Skip to content

Commit 2bec72a

Browse files
authored
feat (ai/core): add onError callback to streamText (#4729)
1 parent 8af97d8 commit 2bec72a

File tree

5 files changed

+81
-0
lines changed

5 files changed

+81
-0
lines changed

.changeset/flat-tigers-heal.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
feat (ai/core): add onError callback to streamText

content/docs/03-ai-sdk-core/05-generating-text.mdx

+19
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,25 @@ It also provides several promises that resolve when the stream is finished:
9494
- `result.finishReason`: The reason the model finished generating text.
9595
- `result.usage`: The usage of the model during text generation.
9696

97+
### `onError` callback
98+
99+
`streamText` immediately starts streaming to enable sending data without waiting for the model.
100+
Errors become part of the stream and are not thrown to prevent e.g. servers from crashing.
101+
102+
To log errors, you can provide an `onError` callback that is triggered when an error occurs.
103+
104+
```tsx highlight="6-8"
105+
import { streamText } from 'ai';
106+
107+
const result = streamText({
108+
model: yourModel,
109+
prompt: 'Invent a new holiday and describe its traditions.',
110+
onError({ error }) {
111+
console.error(error); // your error logging logic here
112+
},
113+
});
114+
```
115+
97116
### `onChunk` callback
98117

99118
When using `streamText`, you can provide an `onChunk` callback that is triggered for each chunk of the stream.

content/docs/07-reference/01-ai-sdk-core/02-stream-text.mdx

+19
Original file line numberDiff line numberDiff line change
@@ -737,6 +737,25 @@ To see `streamText` in action, check out [these examples](#examples).
737737
},
738738
],
739739
},
740+
{
741+
name: 'onError',
742+
type: '(event: OnErrorResult) => Promise<void> |void',
743+
isOptional: true,
744+
description:
745+
'Callback that is called when an error occurs during streaming. You can use it to log errors.',
746+
properties: [
747+
{
748+
type: 'OnErrorResult',
749+
parameters: [
750+
{
751+
name: 'error',
752+
type: 'unknown',
753+
description: 'The error that occurred.',
754+
},
755+
],
756+
},
757+
],
758+
},
740759
{
741760
name: 'experimental_output',
742761
type: 'Output',

packages/ai/core/generate-text/stream-text.test.ts

+24
Original file line numberDiff line numberDiff line change
@@ -1887,6 +1887,30 @@ describe('streamText', () => {
18871887
});
18881888
});
18891889

1890+
describe('options.onError', () => {
1891+
it('should invoke onError', async () => {
1892+
const result: Array<{ error: unknown }> = [];
1893+
1894+
const { fullStream } = streamText({
1895+
model: new MockLanguageModelV1({
1896+
doStream: async () => {
1897+
throw new Error('test error');
1898+
},
1899+
}),
1900+
prompt: 'test-input',
1901+
onError(event) {
1902+
console.log('foo');
1903+
result.push(event);
1904+
},
1905+
});
1906+
1907+
// consume stream
1908+
await convertAsyncIterableToArray(fullStream);
1909+
1910+
expect(result).toStrictEqual([{ error: new Error('test error') }]);
1911+
});
1912+
});
1913+
18901914
describe('options.onFinish', () => {
18911915
it('should send correct information', async () => {
18921916
let result!: Parameters<

packages/ai/core/generate-text/stream-text.ts

+14
Original file line numberDiff line numberDiff line change
@@ -117,6 +117,7 @@ If set and supported by the model, calls will generate deterministic results.
117117
@param experimental_generateMessageId - Generate a unique ID for each message.
118118
119119
@param onChunk - Callback that is called for each chunk of the stream. The stream processing will pause until the callback promise is resolved.
120+
@param onError - Callback that is called when an error occurs during streaming. You can use it to log errors.
120121
@param onStepFinish - Callback that is called when each step (LLM call) is finished, including intermediate steps.
121122
@param onFinish - Callback that is called when the LLM response and all request tool executions
122123
(for tools that have an `execute` function) are finished.
@@ -151,6 +152,7 @@ export function streamText<
151152
experimental_repairToolCall: repairToolCall,
152153
experimental_transform: transform,
153154
onChunk,
155+
onError,
154156
onFinish,
155157
onStepFinish,
156158
_internal: {
@@ -267,6 +269,11 @@ Callback that is called for each chunk of the stream. The stream processing will
267269
>;
268270
}) => Promise<void> | void;
269271

272+
/**
273+
Callback that is invoked when an error occurs during streaming. You can use it to log errors.
274+
*/
275+
onError?: (event: { error: unknown }) => Promise<void> | void;
276+
270277
/**
271278
Callback that is called when the LLM response and all request tool executions
272279
(for tools that have an `execute` function) are finished.
@@ -317,6 +324,7 @@ Internal. For test use only. May change without notice.
317324
continueSteps,
318325
providerOptions,
319326
onChunk,
327+
onError,
320328
onFinish,
321329
onStepFinish,
322330
now,
@@ -478,6 +486,7 @@ class DefaultStreamTextResult<TOOLS extends ToolSet, OUTPUT, PARTIAL_OUTPUT>
478486
continueSteps,
479487
providerOptions,
480488
onChunk,
489+
onError,
481490
onFinish,
482491
onStepFinish,
483492
now,
@@ -520,6 +529,7 @@ class DefaultStreamTextResult<TOOLS extends ToolSet, OUTPUT, PARTIAL_OUTPUT>
520529
}
521530
>;
522531
}) => Promise<void> | void);
532+
onError: undefined | ((event: { error: unknown }) => Promise<void> | void);
523533
onFinish:
524534
| undefined
525535
| ((
@@ -588,6 +598,10 @@ class DefaultStreamTextResult<TOOLS extends ToolSet, OUTPUT, PARTIAL_OUTPUT>
588598
await onChunk?.({ chunk: part });
589599
}
590600

601+
if (part.type === 'error') {
602+
await onError?.({ error: part.error });
603+
}
604+
591605
if (part.type === 'text-delta') {
592606
recordedStepText += part.textDelta;
593607
recordedContinuationText += part.textDelta;

0 commit comments

Comments
 (0)