Skip to content

Commit 6ee1f8e

Browse files
authoredSep 9, 2024··
feat (ai/core): add toDataStream to streamText result (#2938)
1 parent a88839b commit 6ee1f8e

File tree

15 files changed

+500
-28
lines changed

15 files changed

+500
-28
lines changed
 

‎.changeset/late-monkeys-beam.md

+5
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'ai': patch
3+
---
4+
5+
feat (ai/core): add toDataStream to streamText result

‎content/docs/07-reference/ai-sdk-core/02-stream-text.mdx

+27-2
Original file line numberDiff line numberDiff line change
@@ -1171,13 +1171,38 @@ To see `streamText` in action, check out [these examples](#examples).
11711171
],
11721172
},
11731173
{
1174-
name: 'toDataStreamResponse',
1174+
name: 'toDataStream',
11751175
type: '(options?: ToDataStreamOptions) => Response',
1176+
description: 'Converts the result to a data stream.',
1177+
properties: [
1178+
{
1179+
type: 'ToDataStreamOptions',
1180+
parameters: [
1181+
{
1182+
name: 'data',
1183+
type: 'StreamData',
1184+
optional: true,
1185+
description: 'The stream data object.',
1186+
},
1187+
{
1188+
name: 'getErrorMessage',
1189+
type: '(error: unknown) => string',
1190+
description:
1191+
'A function to get the error message from the error object. By default, all errors are masked as "" for safety reasons.',
1192+
optional: true,
1193+
},
1194+
],
1195+
},
1196+
],
1197+
},
1198+
{
1199+
name: 'toDataStreamResponse',
1200+
type: '(options?: ToDataStreamResponseOptions) => Response',
11761201
description:
11771202
'Converts the result to a streamed response object with a stream data part stream. It can be used with the `useChat` and `useCompletion` hooks.',
11781203
properties: [
11791204
{
1180-
type: 'ToDataStreamOptions',
1205+
type: 'ToDataStreamResponseOptions',
11811206
parameters: [
11821207
{
11831208
name: 'init',

‎content/examples/15-api-servers/10-node-js-http-server.mdx

+34-13
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ description: Example of using Vercel AI SDK in a Node.js HTTP server.
55

66
# Node.js HTTP Server
77

8-
You can use the Vercel AI SDK in a Node.js HTTP server to generate and stream text and objects to the client.
8+
You can use the Vercel AI SDK in a Node.js HTTP server to generate text and stream it to the client.
99

1010
## Examples
1111

@@ -20,13 +20,13 @@ curl -X POST http://localhost:8080
2020
set in the `OPENAI_API_KEY` environment variable.
2121
</Note>
2222

23-
### Basic
23+
### Data Stream
2424

2525
You can use the `pipeDataStreamToResponse` method to pipe the stream data to the server response.
2626

2727
```ts file='index.ts'
2828
import { openai } from '@ai-sdk/openai';
29-
import { StreamData, streamText } from 'ai';
29+
import { streamText } from 'ai';
3030
import { createServer } from 'http';
3131

3232
createServer(async (req, res) => {
@@ -39,26 +39,47 @@ createServer(async (req, res) => {
3939
}).listen(8080);
4040
```
4141

42-
### With Stream Data
42+
### Data Stream With Stream Data
43+
44+
`pipeDataStreamToResponse` can be used with `StreamData` to send additional data to the client.
4345

4446
```ts file='index.ts' highlight="6-7,12-15,18"
47+
import { openai } from '@ai-sdk/openai'
48+
import { StreamData, streamText } from 'ai'
49+
import { createServer } from 'http'
50+
51+
createServer(async (req, res) => {
52+
const data = new StreamData()
53+
data.append('initialized call')
54+
55+
const result = await streamText({
56+
model: openai('gpt-4o'),
57+
prompt: 'Invent a new holiday and describe its traditions.'
58+
onFinish() {
59+
data.append('call completed')
60+
data.close()
61+
}
62+
})
63+
64+
result.pipeDataStreamToResponse(res, { data })
65+
}).listen(8080)
66+
```
67+
68+
### Text Stream
69+
70+
You can send a text stream to the client using `pipeTextStreamToResponse`.
71+
72+
```ts file='index.ts'
4573
import { openai } from '@ai-sdk/openai';
46-
import { StreamData, streamText } from 'ai';
74+
import { streamText } from 'ai';
4775
import { createServer } from 'http';
4876

4977
createServer(async (req, res) => {
50-
const data = new StreamData();
51-
data.append('initialized call');
52-
5378
const result = await streamText({
5479
model: openai('gpt-4o'),
5580
prompt: 'Invent a new holiday and describe its traditions.',
56-
onFinish() {
57-
data.append('call completed');
58-
data.close();
59-
},
6081
});
6182

62-
result.pipeDataStreamToResponse(res, { data });
83+
result.pipeTextStreamToResponse(res);
6384
}).listen(8080);
6485
```
+120
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,120 @@
1+
---
2+
title: Hono
3+
description: Example of using Vercel AI SDK in a Hono server.
4+
---
5+
6+
# Hono
7+
8+
You can use the Vercel AI SDK in a Hono server to generate and stream text and objects to the client.
9+
10+
## Examples
11+
12+
The examples start a simple HTTP server that listens on port 8080. You can e.g. test it using `curl`:
13+
14+
```bash
15+
curl -X POST http://localhost:8080
16+
```
17+
18+
<Note>
19+
The examples use the OpenAI `gpt-4o` model. Ensure that the OpenAI API key is
20+
set in the `OPENAI_API_KEY` environment variable.
21+
</Note>
22+
23+
### Data Stream
24+
25+
You can use the `toDataStream` method to get a data stream from the result and then pipe it to the response.
26+
27+
```ts file='index.ts'
28+
import { openai } from '@ai-sdk/openai';
29+
import { serve } from '@hono/node-server';
30+
import { streamText } from 'ai';
31+
import { Hono } from 'hono';
32+
import { stream } from 'hono/streaming';
33+
34+
const app = new Hono();
35+
36+
app.post('/', async c =>
37+
stream(c, async stream => {
38+
const result = await streamText({
39+
model: openai('gpt-4o'),
40+
prompt: 'Invent a new holiday and describe its traditions.',
41+
});
42+
43+
// Mark the response as a v1 data stream:
44+
c.header('X-Vercel-AI-Data-Stream', 'v1');
45+
c.header('Content-Type', 'text/plain; charset=utf-8');
46+
47+
await stream.pipe(result.toDataStream());
48+
}),
49+
);
50+
51+
serve({ fetch: app.fetch, port: 8080 });
52+
```
53+
54+
### Data Stream With Stream Data
55+
56+
`toDataStream` can be used with `StreamData` to send additional data to the client.
57+
58+
```ts file='index.ts' highlight="11-13,18-21,28"
59+
import { openai } from '@ai-sdk/openai';
60+
import { serve } from '@hono/node-server';
61+
import { StreamData, streamText } from 'ai';
62+
import { Hono } from 'hono';
63+
import { stream } from 'hono/streaming';
64+
65+
const app = new Hono();
66+
67+
app.post('/', async c =>
68+
stream(c, async stream => {
69+
// use stream data (optional):
70+
const data = new StreamData();
71+
data.append('initialized call');
72+
73+
const result = await streamText({
74+
model: openai('gpt-4o'),
75+
prompt: 'Invent a new holiday and describe its traditions.',
76+
onFinish() {
77+
data.append('call completed');
78+
data.close();
79+
},
80+
});
81+
82+
// Mark the response as a v1 data stream:
83+
c.header('X-Vercel-AI-Data-Stream', 'v1');
84+
c.header('Content-Type', 'text/plain; charset=utf-8');
85+
86+
await stream.pipe(result.toDataStream({ data }));
87+
}),
88+
);
89+
90+
serve({ fetch: app.fetch, port: 8080 });
91+
```
92+
93+
### Text Stream
94+
95+
You can use the `toTextStream` method to get a text stream from the result and then pipe it to the response.
96+
97+
```ts file='index.ts'
98+
import { openai } from '@ai-sdk/openai';
99+
import { serve } from '@hono/node-server';
100+
import { streamText } from 'ai';
101+
import { Hono } from 'hono';
102+
import { stream } from 'hono/streaming';
103+
104+
const app = new Hono();
105+
106+
app.post('/', async c =>
107+
stream(c, async stream => {
108+
const result = await streamText({
109+
model: openai('gpt-4o'),
110+
prompt: 'Invent a new holiday and describe its traditions.',
111+
});
112+
113+
c.header('Content-Type', 'text/plain; charset=utf-8');
114+
115+
await stream.pipe(result.toTextStream());
116+
}),
117+
);
118+
119+
serve({ fetch: app.fetch, port: 8080 });
120+
```

‎content/examples/15-api-servers/index.mdx

+5
Original file line numberDiff line numberDiff line change
@@ -14,5 +14,10 @@ You can use the Vercel AI SDK in any JavaScript API server to generate text and
1414
description: 'Stream text to a Node.js HTTP server.',
1515
href: '/examples/api-servers/node-js-http-server',
1616
},
17+
{
18+
title: 'Hono',
19+
description: 'Stream text to a Hono server.',
20+
href: '/examples/api-servers/hono',
21+
},
1722
]}
1823
/>

‎examples/hono/.env.example

+7
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
ANTHROPIC_API_KEY=""
2+
OPENAI_API_KEY=""
3+
MISTRAL_API_KEY=""
4+
GOOGLE_GENERATIVE_AI_API_KEY=""
5+
FIREWORKS_API_KEY=""
6+
GROQ_API_KEY=""
7+
PERPLEXITY_API_KEY=""

‎examples/hono/README.md

+28
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
# Hono + Vercel AI SDK Example
2+
3+
## Usage
4+
5+
1. Create .env file with the following content (and more settings, depending on the providers you want to use):
6+
7+
```sh
8+
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
9+
```
10+
11+
2. Run the following commands from the root directory of the AI SDK repo:
12+
13+
```sh
14+
pnpm install
15+
pnpm build
16+
```
17+
18+
3. Run the following command:
19+
20+
```sh
21+
pnpm tsx src/server.ts
22+
```
23+
24+
4. Test the endpoint with Curl:
25+
26+
```sh
27+
curl -X POST http://localhost:8080
28+
```

‎examples/hono/package.json

+20
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
{
2+
"name": "ai-sdk-hono-example",
3+
"version": "0.0.0",
4+
"private": true,
5+
"dependencies": {
6+
"@ai-sdk/openai": "latest",
7+
"@hono/node-server": "1.12.2",
8+
"ai": "latest",
9+
"dotenv": "16.4.5",
10+
"hono": "4.5.11"
11+
},
12+
"scripts": {
13+
"type-check": "tsc --noEmit"
14+
},
15+
"devDependencies": {
16+
"@types/node": "20.11.20",
17+
"tsx": "4.7.1",
18+
"typescript": "5.5.4"
19+
}
20+
}

‎examples/hono/src/server.ts

+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { serve } from '@hono/node-server';
3+
import { StreamData, streamText } from 'ai';
4+
import 'dotenv/config';
5+
import { Hono } from 'hono';
6+
import { stream } from 'hono/streaming';
7+
8+
const app = new Hono();
9+
10+
app.post('/', async c =>
11+
stream(c, async stream => {
12+
// use stream data (optional):
13+
const data = new StreamData();
14+
data.append('initialized call');
15+
16+
const result = await streamText({
17+
model: openai('gpt-4o'),
18+
prompt: 'Invent a new holiday and describe its traditions.',
19+
onFinish() {
20+
data.append('call completed');
21+
data.close();
22+
},
23+
});
24+
25+
// Mark the response as a v1 data stream:
26+
c.header('X-Vercel-AI-Data-Stream', 'v1');
27+
c.header('Content-Type', 'text/plain; charset=utf-8');
28+
29+
await stream.pipe(result.toDataStream({ data }));
30+
}),
31+
);
32+
33+
serve({ fetch: app.fetch, port: 8080 });

‎examples/hono/tsconfig.json

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
{
2+
"compilerOptions": {
3+
"strict": true,
4+
"declaration": true,
5+
"sourceMap": true,
6+
"target": "es2022",
7+
"lib": ["es2022", "dom"],
8+
"module": "esnext",
9+
"types": ["node"],
10+
"esModuleInterop": true,
11+
"allowSyntheticDefaultImports": true,
12+
"moduleResolution": "node",
13+
"rootDir": "./src",
14+
"outDir": "./build",
15+
"skipLibCheck": true
16+
},
17+
"include": ["src/**/*.ts"]
18+
}

‎examples/node-http-server/README.md

+1-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66

77
```sh
88
OPENAI_API_KEY="YOUR_OPENAI_API_KEY"
9-
...
109
```
1110

1211
2. Run the following commands from the root directory of the AI SDK repo:
@@ -16,7 +15,7 @@ pnpm install
1615
pnpm build
1716
```
1817

19-
3. Run any example (from the `examples/ai-core` directory) with the following command:
18+
3. Run the following command:
2019

2120
```sh
2221
pnpm tsx src/server.ts

‎packages/ai/core/generate-text/stream-text-result.ts

+14-1
Original file line numberDiff line numberDiff line change
@@ -109,12 +109,25 @@ Additional response information.
109109
110110
@returns A data stream.
111111
112-
@deprecated Use `toDataStreamResponse` instead.
112+
@deprecated Use `toDataStream` instead.
113113
*/
114114
toAIStream(
115115
callbacks?: AIStreamCallbacksAndOptions,
116116
): ReadableStream<Uint8Array>;
117117

118+
/**
119+
Converts the result to a data stream.
120+
121+
@param data an optional StreamData object that will be merged into the stream.
122+
@param getErrorMessage an optional function that converts an error to an error message.
123+
124+
@return A data stream.
125+
*/
126+
toDataStream(options?: {
127+
data?: StreamData;
128+
getErrorMessage?: (error: unknown) => string;
129+
}): ReadableStream<Uint8Array>;
130+
118131
/**
119132
Writes stream data output to a Node.js response-like object.
120133
It sets a `Content-Type` header to `text/plain; charset=utf-8` and

‎packages/ai/core/generate-text/stream-text.test.ts

+131
Original file line numberDiff line numberDiff line change
@@ -1490,6 +1490,137 @@ describe('result.pipeTextStreamToResponse', async () => {
14901490
});
14911491
});
14921492

1493+
describe('result.toDataStream', () => {
1494+
it('should create a data stream', async () => {
1495+
const result = await streamText({
1496+
model: new MockLanguageModelV1({
1497+
doStream: async () => ({
1498+
stream: convertArrayToReadableStream([
1499+
{ type: 'text-delta', textDelta: 'Hello' },
1500+
{ type: 'text-delta', textDelta: ', ' },
1501+
{ type: 'text-delta', textDelta: 'world!' },
1502+
{
1503+
type: 'finish',
1504+
finishReason: 'stop',
1505+
usage: { promptTokens: 3, completionTokens: 10 },
1506+
},
1507+
]),
1508+
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
1509+
}),
1510+
}),
1511+
prompt: 'test-input',
1512+
});
1513+
1514+
const dataStream = result.toDataStream();
1515+
1516+
expect(
1517+
await convertReadableStreamToArray(
1518+
dataStream.pipeThrough(new TextDecoderStream()),
1519+
),
1520+
).toEqual([
1521+
'0:"Hello"\n',
1522+
'0:", "\n',
1523+
'0:"world!"\n',
1524+
'e:{"finishReason":"stop","usage":{"promptTokens":3,"completionTokens":10}}\n',
1525+
'd:{"finishReason":"stop","usage":{"promptTokens":3,"completionTokens":10}}\n',
1526+
]);
1527+
});
1528+
1529+
it('should support merging with existing stream data', async () => {
1530+
const result = await streamText({
1531+
model: new MockLanguageModelV1({
1532+
doStream: async () => ({
1533+
stream: convertArrayToReadableStream([
1534+
{ type: 'text-delta', textDelta: 'Hello' },
1535+
{ type: 'text-delta', textDelta: ', ' },
1536+
{ type: 'text-delta', textDelta: 'world!' },
1537+
{
1538+
type: 'finish',
1539+
finishReason: 'stop',
1540+
usage: { promptTokens: 3, completionTokens: 10 },
1541+
},
1542+
]),
1543+
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
1544+
}),
1545+
}),
1546+
prompt: 'test-input',
1547+
});
1548+
1549+
const streamData = new StreamData();
1550+
streamData.append('stream-data-value');
1551+
streamData.close();
1552+
1553+
const dataStream = result.toDataStream({ data: streamData });
1554+
1555+
expect(
1556+
await convertReadableStreamToArray(
1557+
dataStream.pipeThrough(new TextDecoderStream()),
1558+
),
1559+
).toEqual([
1560+
'2:["stream-data-value"]\n',
1561+
'0:"Hello"\n',
1562+
'0:", "\n',
1563+
'0:"world!"\n',
1564+
'e:{"finishReason":"stop","usage":{"promptTokens":3,"completionTokens":10}}\n',
1565+
'd:{"finishReason":"stop","usage":{"promptTokens":3,"completionTokens":10}}\n',
1566+
]);
1567+
});
1568+
1569+
it('should mask error messages by default', async () => {
1570+
const result = await streamText({
1571+
model: new MockLanguageModelV1({
1572+
doStream: async () => ({
1573+
stream: convertArrayToReadableStream([
1574+
{ type: 'error', error: 'error' },
1575+
]),
1576+
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
1577+
}),
1578+
}),
1579+
prompt: 'test-input',
1580+
});
1581+
1582+
const dataStream = result.toDataStream();
1583+
1584+
expect(
1585+
await convertReadableStreamToArray(
1586+
dataStream.pipeThrough(new TextDecoderStream()),
1587+
),
1588+
).toEqual([
1589+
'3:""\n',
1590+
'e:{"finishReason":"error","usage":{"promptTokens":0,"completionTokens":0}}\n',
1591+
'd:{"finishReason":"error","usage":{"promptTokens":0,"completionTokens":0}}\n',
1592+
]);
1593+
});
1594+
1595+
it('should support custom error messages', async () => {
1596+
const result = await streamText({
1597+
model: new MockLanguageModelV1({
1598+
doStream: async () => ({
1599+
stream: convertArrayToReadableStream([
1600+
{ type: 'error', error: 'error' },
1601+
]),
1602+
rawCall: { rawPrompt: 'prompt', rawSettings: {} },
1603+
}),
1604+
}),
1605+
prompt: 'test-input',
1606+
});
1607+
1608+
const dataStream = result.toDataStream({
1609+
getErrorMessage: error => `custom error message: ${error}`,
1610+
});
1611+
1612+
expect(
1613+
await convertReadableStreamToArray(
1614+
dataStream.pipeThrough(new TextDecoderStream()),
1615+
),
1616+
).toEqual([
1617+
'3:"custom error message: error"\n',
1618+
'e:{"finishReason":"error","usage":{"promptTokens":0,"completionTokens":0}}\n',
1619+
'd:{"finishReason":"error","usage":{"promptTokens":0,"completionTokens":0}}\n',
1620+
]);
1621+
});
1622+
});
1623+
14931624
describe('result.toDataStreamResponse', () => {
14941625
it('should create a Response with a data stream', async () => {
14951626
const result = await streamText({

‎packages/ai/core/generate-text/stream-text.ts

+15-10
Original file line numberDiff line numberDiff line change
@@ -905,10 +905,10 @@ However, the LLM results are expected to be small enough to not cause issues.
905905
}
906906

907907
toAIStream(callbacks: AIStreamCallbacksAndOptions = {}) {
908-
return this.toDataStream({ callbacks });
908+
return this.toDataStreamInternal({ callbacks });
909909
}
910910

911-
private toDataStream({
911+
private toDataStreamInternal({
912912
callbacks = {},
913913
getErrorMessage = () => '', // mask error messages for safety by default
914914
}: {
@@ -1080,9 +1080,7 @@ However, the LLM results are expected to be small enough to not cause issues.
10801080
contentType: 'text/plain; charset=utf-8',
10811081
dataStreamVersion: 'v1',
10821082
}),
1083-
stream: data
1084-
? mergeStreams(data.stream, this.toDataStream({ getErrorMessage }))
1085-
: this.toDataStream({ getErrorMessage }),
1083+
stream: this.toDataStream({ data, getErrorMessage }),
10861084
});
10871085
}
10881086

@@ -1104,6 +1102,17 @@ However, the LLM results are expected to be small enough to not cause issues.
11041102
return this.toDataStreamResponse(options);
11051103
}
11061104

1105+
toDataStream(options?: {
1106+
data?: StreamData;
1107+
getErrorMessage?: (error: unknown) => string;
1108+
}) {
1109+
const stream = this.toDataStreamInternal({
1110+
getErrorMessage: options?.getErrorMessage,
1111+
});
1112+
1113+
return options?.data ? mergeStreams(options?.data.stream, stream) : stream;
1114+
}
1115+
11071116
toDataStreamResponse(
11081117
options?:
11091118
| ResponseInit
@@ -1139,11 +1148,7 @@ However, the LLM results are expected to be small enough to not cause issues.
11391148
? options.getErrorMessage
11401149
: undefined;
11411150

1142-
const stream = data
1143-
? mergeStreams(data.stream, this.toDataStream({ getErrorMessage }))
1144-
: this.toDataStream({ getErrorMessage });
1145-
1146-
return new Response(stream, {
1151+
return new Response(this.toDataStream({ data, getErrorMessage }), {
11471152
status: init?.status ?? 200,
11481153
statusText: init?.statusText,
11491154
headers: prepareResponseHeaders(init, {

‎pnpm-lock.yaml

+42
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)
Please sign in to comment.