}
- {m.content}
+```tsx filename="app/page.tsx"
+messages.map(message => (
+
));
```
diff --git a/content/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx b/content/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx
index 6706763bbfa0..e42e19efb53e 100644
--- a/content/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx
+++ b/content/docs/04-ai-sdk-ui/03-chatbot-tool-usage.mdx
@@ -22,12 +22,12 @@ The flow is as follows:
1. Client-side tools that should be automatically executed are handled with the `onToolCall` callback.
You can return the tool result from the callback.
1. Client-side tool that require user interactions can be displayed in the UI.
- The tool calls and results are available in the `toolInvocations` property of the last assistant message.
+ The tool calls and results are available as tool invocation parts in the `parts` property of the last assistant message.
1. When the user interaction is done, `addToolResult` can be used to add the tool result to the chat.
1. When there are tool calls in the last assistant message and all tool results are available, the client sends the updated messages back to the server.
This triggers another iteration of this flow.
-The tool call and tool executions are integrated into the assistant message as `toolInvocations`.
+The tool call and tool executions are integrated into the assistant message as tool invocation parts.
A tool invocation is at first a tool call, and then it becomes a tool result when the tool is executed.
The tool result contains all information about the tool call as well as the result of the tool execution.
@@ -61,7 +61,7 @@ export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-4o'),
messages,
tools: {
// server-side tool with execute function:
@@ -98,7 +98,8 @@ export async function POST(req: Request) {
### Client-side page
The client-side page uses the `useChat` hook to create a chatbot application with real-time message streaming.
-Tool invocations are displayed in the chat UI.
+Tool invocations are displayed in the chat UI as tool invocation parts.
+Please make sure to render the messages using the `parts` property of the message.
There are three things worth mentioning:
@@ -117,7 +118,7 @@ There are three things worth mentioning:
'use client';
import { ToolInvocation } from 'ai';
-import { Message, useChat } from 'ai/react';
+import { useChat } from 'ai/react';
export default function Chat() {
const { messages, input, handleInputChange, handleSubmit, addToolResult } =
@@ -140,43 +141,110 @@ export default function Chat() {
return (
<>
- {messages?.map((m: Message) => (
-
-
{m.role}:
- {m.content}
- {m.toolInvocations?.map((toolInvocation: ToolInvocation) => {
- const toolCallId = toolInvocation.toolCallId;
- const addResult = (result: string) =>
- addToolResult({ toolCallId, result });
-
- // render confirmation tool (client-side tool with user interaction)
- if (toolInvocation.toolName === 'askForConfirmation') {
- return (
-
- {toolInvocation.args.message}
-
- {'result' in toolInvocation ? (
- {toolInvocation.result}
- ) : (
- <>
- addResult('Yes')}>Yes
- addResult('No')}>No
- >
- )}
-
-
- );
+ {messages?.map(message => (
+
+
{`${message.role}: `}
+ {message.parts.map(part => {
+ switch (part.type) {
+ // render text parts as simple text:
+ case 'text':
+ return part.text;
+
+ // for tool invocations, distinguish between the tools and the state:
+ case 'tool-invocation': {
+ const callId = part.toolInvocation.toolCallId;
+
+ switch (part.toolInvocation.toolName) {
+ case 'askForConfirmation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ {part.toolInvocation.args.message}
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'Yes, confirmed.',
+ })
+ }
+ >
+ Yes
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'No, denied',
+ })
+ }
+ >
+ No
+
+
+
+ );
+ case 'result':
+ return (
+
+ Location access allowed:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
+
+ case 'getLocation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ Getting location...
+
+ );
+ case 'result':
+ return (
+
+ Location: {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
+
+ case 'getWeatherInformation': {
+ switch (part.toolInvocation.state) {
+ // example of pre-rendering streaming tool calls:
+ case 'partial-call':
+ return (
+
+ {JSON.stringify(part.toolInvocation, null, 2)}
+
+ );
+ case 'call':
+ return (
+
+ Getting weather information for{' '}
+ {part.toolInvocation.args.city}...
+
+ );
+ case 'result':
+ return (
+
+ Weather in {part.toolInvocation.args.city}:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
+ }
+ }
}
-
- // other tools:
- return 'result' in toolInvocation ? (
-
- Tool call {`${toolInvocation.toolName}: `}
- {toolInvocation.result}
-
- ) : (
-
Calling {toolInvocation.toolName}...
- );
})}
@@ -210,7 +278,7 @@ export async function POST(req: Request) {
When the flag is enabled, partial tool calls will be streamed as part of the data stream.
They are available through the `useChat` hook.
-The `toolInvocations` property of assistant messages will also contain partial tool calls.
+The tool invocation parts of assistant messages will also contain partial tool calls.
You can use the `state` property of the tool invocation to render the correct UI.
```tsx filename='app/page.tsx' highlight="9,10"
@@ -218,16 +286,18 @@ export default function Chat() {
// ...
return (
<>
- {messages?.map((m: Message) => (
-
- {m.toolInvocations?.map((toolInvocation: ToolInvocation) => {
- switch (toolInvocation.state) {
- case 'partial-call':
- return <>render partial tool call>;
- case 'call':
- return <>render full tool call>;
- case 'result':
- return <>render tool result>;
+ {messages?.map(message => (
+
+ {message.parts.map(part => {
+ if (part.type === 'tool-invocation') {
+ switch (part.toolInvocation.state) {
+ case 'partial-call':
+ return <>render partial tool call>;
+ case 'call':
+ return <>render full tool call>;
+ case 'result':
+ return <>render tool result>;
+ }
}
})}
@@ -251,7 +321,7 @@ export async function POST(req: Request) {
const { messages } = await req.json();
const result = streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-4o'),
messages,
tools: {
getWeatherInformation: {
diff --git a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx
index df8166141dc1..2aba6cdd7e46 100644
--- a/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx
+++ b/content/docs/07-reference/02-ai-sdk-ui/01-use-chat.mdx
@@ -211,11 +211,11 @@ Allows you to easily create a conversational user interface for your chatbot app
content={[
{
name: 'messages',
- type: 'Message[]',
+ type: 'UIMessage[]',
description: 'The current array of chat messages.',
properties: [
{
- type: 'Message',
+ type: 'UIMessage',
parameters: [
{
name: 'id',
@@ -227,17 +227,6 @@ Allows you to easily create a conversational user interface for your chatbot app
type: "'system' | 'user' | 'assistant' | 'data'",
description: 'The role of the message.',
},
- {
- name: 'content',
- type: 'string',
- description: 'The content of the message.',
- },
- {
- name: 'reasoning',
- type: 'string',
- isOptional: true,
- description: 'The reasoning of the message.',
- },
{
name: 'createdAt',
type: 'Date',
@@ -245,16 +234,9 @@ Allows you to easily create a conversational user interface for your chatbot app
description: 'The creation date of the message.',
},
{
- name: 'name',
+ name: 'content',
type: 'string',
- isOptional: true,
- description: 'The name of the message.',
- },
- {
- name: 'data',
- type: 'JSONValue',
- isOptional: true,
- description: 'Additional data sent along with the message.',
+ description: 'The content of the message.',
},
{
name: 'annotations',
@@ -264,98 +246,146 @@ Allows you to easily create a conversational user interface for your chatbot app
'Additional annotations sent along with the message.',
},
{
- name: 'toolInvocations',
- type: 'Array
',
- isOptional: true,
+ name: 'parts',
+ type: 'Array',
description:
- 'An array of tool invocations that are associated with the (assistant) message.',
+ 'An array of message parts that are associated with the message.',
properties: [
{
- type: 'ToolInvocation',
+ type: 'TextUIPart',
+ description: 'A text part of the message.',
parameters: [
{
- name: 'state',
- type: "'partial-call'",
- description:
- 'The state of the tool call when it was partially created.',
- },
- {
- name: 'toolCallId',
- type: 'string',
- description:
- 'ID of the tool call. This ID is used to match the tool call with the tool result.',
+ name: 'type',
+ type: '"text"',
},
{
- name: 'toolName',
+ name: 'text',
type: 'string',
- description: 'Name of the tool that is being called.',
- },
- {
- name: 'args',
- type: 'any',
- description:
- 'Partial arguments of the tool call. This is a JSON-serializable object.',
+ description: 'The text content of the part.',
},
],
},
{
- type: 'ToolInvocation',
+ type: 'ReasoningUIPart',
+ description: 'A reasoning part of the message.',
parameters: [
{
- name: 'state',
- type: "'call'",
- description:
- 'The state of the tool call when it was fully created.',
+ name: 'type',
+ type: '"reasoning"',
},
{
- name: 'toolCallId',
+ name: 'reasoning',
type: 'string',
- description:
- 'ID of the tool call. This ID is used to match the tool call with the tool result.',
- },
- {
- name: 'toolName',
- type: 'string',
- description: 'Name of the tool that is being called.',
- },
- {
- name: 'args',
- type: 'any',
- description:
- 'Arguments of the tool call. This is a JSON-serializable object that matches the tools input schema.',
+ description: 'The reasoning content of the part.',
},
],
},
{
- type: 'ToolInvocation',
+ type: 'ToolInvocationUIPart',
+ description: 'A tool invocation part of the message.',
parameters: [
{
- name: 'state',
- type: "'result'",
- description:
- 'The state of the tool call when the result is available.',
- },
- {
- name: 'toolCallId',
- type: 'string',
- description:
- 'ID of the tool call. This ID is used to match the tool call with the tool result.',
- },
- {
- name: 'toolName',
- type: 'string',
- description: 'Name of the tool that is being called.',
- },
- {
- name: 'args',
- type: 'any',
- description:
- 'Arguments of the tool call. This is a JSON-serializable object that matches the tools input schema.',
+ name: 'type',
+ type: '"tool-invocation"',
},
{
- name: 'result',
- type: 'any',
- description: 'The result of the tool call.',
+ name: 'toolInvocation',
+ type: 'ToolInvocation',
+ properties: [
+ {
+ type: 'ToolInvocation',
+ parameters: [
+ {
+ name: 'state',
+ type: "'partial-call'",
+ description:
+ 'The state of the tool call when it was partially created.',
+ },
+ {
+ name: 'toolCallId',
+ type: 'string',
+ description:
+ 'ID of the tool call. This ID is used to match the tool call with the tool result.',
+ },
+ {
+ name: 'toolName',
+ type: 'string',
+ description:
+ 'Name of the tool that is being called.',
+ },
+ {
+ name: 'args',
+ type: 'any',
+ description:
+ 'Partial arguments of the tool call. This is a JSON-serializable object.',
+ },
+ ],
+ },
+ {
+ type: 'ToolInvocation',
+ parameters: [
+ {
+ name: 'state',
+ type: "'call'",
+ description:
+ 'The state of the tool call when it was fully created.',
+ },
+ {
+ name: 'toolCallId',
+ type: 'string',
+ description:
+ 'ID of the tool call. This ID is used to match the tool call with the tool result.',
+ },
+ {
+ name: 'toolName',
+ type: 'string',
+ description:
+ 'Name of the tool that is being called.',
+ },
+ {
+ name: 'args',
+ type: 'any',
+ description:
+ 'Arguments of the tool call. This is a JSON-serializable object that matches the tools input schema.',
+ },
+ ],
+ },
+ {
+ type: 'ToolInvocation',
+ parameters: [
+ {
+ name: 'state',
+ type: "'result'",
+ description:
+ 'The state of the tool call when the result is available.',
+ },
+ {
+ name: 'toolCallId',
+ type: 'string',
+ description:
+ 'ID of the tool call. This ID is used to match the tool call with the tool result.',
+ },
+ {
+ name: 'toolName',
+ type: 'string',
+ description:
+ 'Name of the tool that is being called.',
+ },
+ {
+ name: 'args',
+ type: 'any',
+ description:
+ 'Arguments of the tool call. This is a JSON-serializable object that matches the tools input schema.',
+ },
+ {
+ name: 'result',
+ type: 'any',
+ description: 'The result of the tool call.',
+ },
+ ],
+ },
+ ],
},
],
},
diff --git a/examples/next-openai/app/api/use-chat-tools/route.ts b/examples/next-openai/app/api/use-chat-tools/route.ts
index 51f5f40e933e..086d9d9d6fbc 100644
--- a/examples/next-openai/app/api/use-chat-tools/route.ts
+++ b/examples/next-openai/app/api/use-chat-tools/route.ts
@@ -1,3 +1,4 @@
+import { anthropic } from '@ai-sdk/anthropic';
import { openai } from '@ai-sdk/openai';
import { streamText, tool } from 'ai';
import { z } from 'zod';
@@ -10,6 +11,7 @@ export async function POST(req: Request) {
const result = streamText({
model: openai('gpt-4o'),
+ // model: anthropic('claude-3-5-sonnet-latest'),
messages,
toolCallStreaming: true,
maxSteps: 5, // multi-steps for server-side tools
diff --git a/examples/next-openai/app/use-chat-persistence-single-message-tools/[id]/chat.tsx b/examples/next-openai/app/use-chat-persistence-single-message-tools/[id]/chat.tsx
index 0ce45d642879..0078f3233915 100644
--- a/examples/next-openai/app/use-chat-persistence-single-message-tools/[id]/chat.tsx
+++ b/examples/next-openai/app/use-chat-persistence-single-message-tools/[id]/chat.tsx
@@ -1,6 +1,6 @@
'use client';
-import { createIdGenerator, ToolInvocation } from 'ai';
+import { createIdGenerator } from 'ai';
import { Message, useChat } from 'ai/react';
export default function Chat({
@@ -37,79 +37,105 @@ export default function Chat({
return (
- {messages?.map((m: Message) => (
-
-
{`${m.role}: `}
- {m.toolInvocations?.map((toolInvocation: ToolInvocation) => {
- const toolCallId = toolInvocation.toolCallId;
+ {messages?.map(message => (
+
+
{`${message.role}: `}
+ {message.parts.map(part => {
+ switch (part.type) {
+ case 'text':
+ return part.text;
+ case 'tool-invocation': {
+ const callId = part.toolInvocation.toolCallId;
- // example of pre-rendering streaming tool calls
- if (toolInvocation.state === 'partial-call') {
- return (
-
- {JSON.stringify(toolInvocation, null, 2)}
-
- );
- }
+ switch (part.toolInvocation.toolName) {
+ case 'askForConfirmation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ {part.toolInvocation.args.message}
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'Yes, confirmed.',
+ })
+ }
+ >
+ Yes
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'No, denied',
+ })
+ }
+ >
+ No
+
+
+
+ );
+ case 'result':
+ return (
+
+ Location access allowed:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ }
- // render confirmation tool (client-side tool with user interaction)
- if (toolInvocation.toolName === 'askForConfirmation') {
- return (
-
- {toolInvocation.args.message}
-
- {'result' in toolInvocation ? (
- {toolInvocation.result}
- ) : (
- <>
-
- addToolResult({
- toolCallId,
- result: 'Yes, confirmed.',
- })
- }
- >
- Yes
-
-
- addToolResult({
- toolCallId,
- result: 'No, denied',
- })
- }
- >
- No
-
- >
- )}
-
-
- );
- }
+ case 'getLocation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ Getting location...
+
+ );
+ case 'result':
+ return (
+
+ Location: {part.toolInvocation.result}
+
+ );
+ }
+ }
- // other tools:
- return 'result' in toolInvocation ? (
-
- Tool call {`${toolInvocation.toolName}: `}
- {toolInvocation.result}
-
- ) : (
-
- Calling {toolInvocation.toolName}...
-
- );
- })}{' '}
- {m.annotations && (
-
- {JSON.stringify(m.annotations, null, 2)}
-
- )}
- {m.content}
-
+ case 'getWeatherInformation': {
+ switch (part.toolInvocation.state) {
+ // example of pre-rendering streaming tool calls:
+ case 'partial-call':
+ return (
+
+ {JSON.stringify(part.toolInvocation, null, 2)}
+
+ );
+ case 'call':
+ return (
+
+ Getting weather information for{' '}
+ {part.toolInvocation.args.city}...
+
+ );
+ case 'result':
+ return (
+
+ Weather in {part.toolInvocation.args.city}:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ }
+ }
+ }
+ }
+ })}
))}
diff --git a/examples/next-openai/app/use-chat-reasoning/page.tsx b/examples/next-openai/app/use-chat-reasoning/page.tsx
index d40a6a174c8a..0f6dddbc3594 100644
--- a/examples/next-openai/app/use-chat-reasoning/page.tsx
+++ b/examples/next-openai/app/use-chat-reasoning/page.tsx
@@ -18,15 +18,25 @@ export default function Chat() {
return (
- {messages.map(m => (
-
- {m.role === 'user' ? 'User: ' : 'AI: '}
- {m.reasoning && (
-
- {m.reasoning}
-
- )}
- {m.content}
+ {messages.map(message => (
+
+ {message.role === 'user' ? 'User: ' : 'AI: '}
+ {message.parts.map((part, index) => {
+ if (part.type === 'text') {
+ return
{part.text}
;
+ }
+
+ if (part.type === 'reasoning') {
+ return (
+
+ {part.reasoning}
+
+ );
+ }
+ })}
))}
diff --git a/examples/next-openai/app/use-chat-tools/page.tsx b/examples/next-openai/app/use-chat-tools/page.tsx
index 4df23edffe37..4c1f12cb646e 100644
--- a/examples/next-openai/app/use-chat-tools/page.tsx
+++ b/examples/next-openai/app/use-chat-tools/page.tsx
@@ -1,7 +1,6 @@
'use client';
-import { ToolInvocation } from 'ai';
-import { Message, useChat } from 'ai/react';
+import { useChat } from 'ai/react';
export default function Chat() {
const { messages, input, handleInputChange, handleSubmit, addToolResult } =
@@ -25,74 +24,108 @@ export default function Chat() {
return (
- {messages?.map((m: Message) => (
-
-
{`${m.role}: `}
- {m.toolInvocations?.map((toolInvocation: ToolInvocation) => {
- const toolCallId = toolInvocation.toolCallId;
+ {messages?.map(message => (
+
+
{`${message.role}: `}
+ {message.parts.map(part => {
+ switch (part.type) {
+ case 'text':
+ return part.text;
+ case 'tool-invocation': {
+ const callId = part.toolInvocation.toolCallId;
- // example of pre-rendering streaming tool calls
- if (toolInvocation.state === 'partial-call') {
- return (
-
- {JSON.stringify(toolInvocation, null, 2)}
-
- );
- }
+ switch (part.toolInvocation.toolName) {
+ case 'askForConfirmation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ {part.toolInvocation.args.message}
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'Yes, confirmed.',
+ })
+ }
+ >
+ Yes
+
+
+ addToolResult({
+ toolCallId: callId,
+ result: 'No, denied',
+ })
+ }
+ >
+ No
+
+
+
+ );
+ case 'result':
+ return (
+
+ Location access allowed:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
- // render confirmation tool (client-side tool with user interaction)
- if (toolInvocation.toolName === 'askForConfirmation') {
- return (
-
- {toolInvocation.args.message}
-
- {'result' in toolInvocation ? (
- {toolInvocation.result}
- ) : (
- <>
-
- addToolResult({
- toolCallId,
- result: 'Yes, confirmed.',
- })
- }
- >
- Yes
-
-
- addToolResult({
- toolCallId,
- result: 'No, denied',
- })
- }
- >
- No
-
- >
- )}
-
-
- );
- }
+ case 'getLocation': {
+ switch (part.toolInvocation.state) {
+ case 'call':
+ return (
+
+ Getting location...
+
+ );
+ case 'result':
+ return (
+
+ Location: {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
- // other tools:
- return 'result' in toolInvocation ? (
-
- Tool call {`${toolInvocation.toolName}: `}
- {toolInvocation.result}
-
- ) : (
-
- Calling {toolInvocation.toolName}...
-
- );
+ case 'getWeatherInformation': {
+ switch (part.toolInvocation.state) {
+ // example of pre-rendering streaming tool calls:
+ case 'partial-call':
+ return (
+
+ {JSON.stringify(part.toolInvocation, null, 2)}
+
+ );
+ case 'call':
+ return (
+
+ Getting weather information for{' '}
+ {part.toolInvocation.args.city}...
+
+ );
+ case 'result':
+ return (
+
+ Weather in {part.toolInvocation.args.city}:{' '}
+ {part.toolInvocation.result}
+
+ );
+ }
+ break;
+ }
+ }
+ }
+ }
})}
- {m.content}
-
))}
diff --git a/examples/next-openai/package.json b/examples/next-openai/package.json
index 2d07f6dcf469..0182b0c28e00 100644
--- a/examples/next-openai/package.json
+++ b/examples/next-openai/package.json
@@ -9,6 +9,7 @@
"lint": "next lint"
},
"dependencies": {
+ "@ai-sdk/anthropic": "1.1.6",
"@ai-sdk/deepseek": "0.1.8",
"@ai-sdk/openai": "1.1.9",
"@ai-sdk/ui-utils": "1.1.9",
diff --git a/examples/nuxt-openai/pages/use-chat-tools/index.vue b/examples/nuxt-openai/pages/use-chat-tools/index.vue
index 8e0ee045ae65..bd6374a60e94 100644
--- a/examples/nuxt-openai/pages/use-chat-tools/index.vue
+++ b/examples/nuxt-openai/pages/use-chat-tools/index.vue
@@ -13,65 +13,116 @@ const { input, handleSubmit, messages, addToolResult } = useChat({
}
},
});
+
+const messageList = computed(() => messages.value); // computer property for type inference
-
-
{{ `${m.role}: ` }}
- {{ m.content }}
-
-
-
- {{ JSON.stringify(toolInvocation, null, 2) }}
-
+
+
{{ `${message.role}: ` }}
+
+
+ {{ part.text }}
+
+
+
+
+
+ {{ part.toolInvocation.args.message }}
+
+
+ Yes
+
+
+ No
+
+
+
+
+
+
+ Location access allowed: {{ part.toolInvocation.result }}
+
+
+
-
-
- {{ toolInvocation.args.message }}
-
-
{{ toolInvocation.result }}
-
-
+
+
- Yes
-
-
+
+
+
- No
-
+ Location: {{ part.toolInvocation.result }}
+
-
-
+
-
-
-
- Tool call {{ `${toolInvocation.toolName}: ` }}
- {{ toolInvocation.result }}
+
+
+
+ {{ JSON.stringify(part.toolInvocation, null, 2) }}
+
+
+
+
+ Getting weather information for
+ {{ part.toolInvocation.args.city }}...
+
+
+
+
+ Weather in {{ part.toolInvocation.args.city }}:
+ {{ part.toolInvocation.result }}
+
+
- Calling {{ toolInvocation.toolName }}...
-
+
+
diff --git a/examples/nuxt-openai/server/api/use-chat-tools.ts b/examples/nuxt-openai/server/api/use-chat-tools.ts
index a6e1de5cfbe9..76d27fa25f0e 100644
--- a/examples/nuxt-openai/server/api/use-chat-tools.ts
+++ b/examples/nuxt-openai/server/api/use-chat-tools.ts
@@ -11,7 +11,7 @@ export default defineLazyEventHandler(async () => {
const { messages } = await readBody(event);
const result = streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-4o'),
messages,
toolCallStreaming: true,
maxSteps: 5, // multi-steps for server-side tools
diff --git a/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts b/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts
index 33545265b53c..65be7b857ec3 100644
--- a/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts
+++ b/examples/solidstart-openai/src/routes/api/use-chat-tools/index.ts
@@ -7,8 +7,10 @@ export const POST = async (event: APIEvent) => {
const { messages } = await event.request.json();
const result = streamText({
- model: openai('gpt-4o-mini'),
+ model: openai('gpt-4o'),
messages,
+ toolCallStreaming: true,
+ maxSteps: 5, // multi-steps for server-side tools
tools: {
// server-side tool with execute function:
getWeatherInformation: {
diff --git a/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx b/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx
index 458bfa190bb1..a63e166338dc 100644
--- a/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx
+++ b/examples/solidstart-openai/src/routes/use-chat-tools/index.tsx
@@ -1,5 +1,6 @@
/* eslint-disable react/jsx-key */
import { useChat } from '@ai-sdk/solid';
+import { TextUIPart, ToolInvocationUIPart } from '@ai-sdk/ui-utils';
import { For, Show } from 'solid-js';
export default function Chat() {
@@ -24,82 +25,172 @@ export default function Chat() {
return (
- No messages
}>
+
{message => (
{`${message.role}: `}
- {message.content}
-
- {toolInvocation => (
-
- Calling {toolInvocation.toolName}...
-
- }
- >
- {toolInvocation => (
-
- Tool call {`${toolInvocation.toolName}: `}
- {toolInvocation.result}
-
- )}
-
- }
- when={
- toolInvocation.toolName === 'askForConfirmation' &&
- toolInvocation
- }
- keyed
- >
- {toolInvocation => (
-
- {toolInvocation.args.message}
-
+
+ {part => (
+ <>
+
+ {(part as TextUIPart).text}
+
+
+ {
+ <>
-
- addToolResult({
- toolCallId: toolInvocation.toolCallId,
- result: 'Yes, confirmed.',
- })
- }
- >
- Yes
-
-
- addToolResult({
- toolCallId: toolInvocation.toolCallId,
- result: 'No, denied',
- })
- }
- >
- No
-
- >
+ when={
+ (part as ToolInvocationUIPart).toolInvocation
+ .toolName === 'askForConfirmation'
}
- when={'result' in toolInvocation && toolInvocation}
- keyed
>
- {toolInvocation => {toolInvocation.result} }
+
+
+ {
+ (part as ToolInvocationUIPart).toolInvocation
+ .args.message
+ }
+
+
+ addToolResult({
+ toolCallId: (part as ToolInvocationUIPart)
+ .toolInvocation.toolCallId,
+ result: 'Yes, confirmed.',
+ })
+ }
+ >
+ Yes
+
+
+ addToolResult({
+ toolCallId: (part as ToolInvocationUIPart)
+ .toolInvocation.toolCallId,
+ result: 'No, denied',
+ })
+ }
+ >
+ No
+
+
+
+
+
+
+ Location access allowed:{' '}
+ {(part as any).toolInvocation.result}
+
+
-
-
- )}
-
+
+
+
+ Getting location...
+
+
+
+ Location: {(part as any).toolInvocation.result}
+
+
+
+
+
+
+
+ {JSON.stringify(
+ (part as ToolInvocationUIPart).toolInvocation,
+ null,
+ 2,
+ )}
+
+
+
+
+ Getting weather information for{' '}
+ {
+ (part as ToolInvocationUIPart).toolInvocation
+ .args.city
+ }
+ ...
+
+
+
+
+
+ Weather in{' '}
+ {(part as any).toolInvocation.args.city}:{' '}
+ {(part as any).toolInvocation.result}
+
+
+
+ >
+ }
+
+ >
)}
-
)}
diff --git a/examples/sveltekit-openai/src/routes/api/use-chat-tools/+server.ts b/examples/sveltekit-openai/src/routes/api/use-chat-tools/+server.ts
index 1075293a7473..0d7793fbe7c7 100644
--- a/examples/sveltekit-openai/src/routes/api/use-chat-tools/+server.ts
+++ b/examples/sveltekit-openai/src/routes/api/use-chat-tools/+server.ts
@@ -18,8 +18,10 @@ export const POST = (async ({ request }) => {
const { messages } = await request.json();
const result = streamText({
- model: openai('gpt-4-turbo'),
+ model: openai('gpt-4o'),
messages,
+ toolCallStreaming: true,
+ maxSteps: 5, // multi-steps for server-side tools
tools: {
// server-side tool with execute function:
getWeatherInformation: {
diff --git a/examples/sveltekit-openai/src/routes/use-chat-tools/+page.svelte b/examples/sveltekit-openai/src/routes/use-chat-tools/+page.svelte
index 326951c87eb3..ca0fa8db1ae9 100644
--- a/examples/sveltekit-openai/src/routes/use-chat-tools/+page.svelte
+++ b/examples/sveltekit-openai/src/routes/use-chat-tools/+page.svelte
@@ -4,53 +4,88 @@
const { input, handleSubmit, messages, addToolResult } = useChat({
api: '/api/use-chat-tools',
maxSteps: 5,
+
// run client-side tools that are automatically executed:
async onToolCall({ toolCall }) {
- if (toolCall.toolName === 'getLocation') {
- const cities = ['New York', 'Los Angeles', 'Chicago', 'San Francisco'];
- return cities[Math.floor(Math.random() * cities.length)];
- }
- }
+ if (toolCall.toolName === 'getLocation') {
+ const cities = ['New York', 'Los Angeles', 'Chicago', 'San Francisco'];
+ return cities[Math.floor(Math.random() * cities.length)];
+ }
+ },
});
-
-
-
- {#each $messages as message (message.id)}
- {message.role}: {message.content}
- {#if message.toolInvocations}
- {#each message.toolInvocations as toolInvocation (toolInvocation.toolCallId)}
- {@const toolCallId = toolInvocation.toolCallId}
+
+ {#each $messages as message (message.id)}
+
+
{message.role}
- {#if toolInvocation.toolName === 'askForConfirmation'}
-
- {toolInvocation.args.message}
-
- {#if 'result' in toolInvocation}
- {toolInvocation.result}
- {:else}
- addToolResult({ toolCallId, result: 'Yes' })}>Yes
- addToolResult({ toolCallId, result: 'No' })}>No
- {/if}
-
-
- {/if}
+ {#each message.parts as part}
+ {#if part.type === 'text'}
+ {part.text}
+ {:else if part.type === 'tool-invocation'}
+ {@const toolCallId = part.toolInvocation.toolCallId}
+ {@const toolName = part.toolInvocation.toolName}
+ {@const state = part.toolInvocation.state}
- {#if 'result' in toolInvocation}
-
- Tool call {`${toolInvocation.toolName}: `}
- {toolInvocation.result}
-
- {:else}
-
Calling {toolInvocation.toolName}...
- {/if}
- {/each}
+ {#if toolName === 'askForConfirmation'}
+ {#if state === 'call'}
+
+ {part.toolInvocation.args.message}
+
+
+ addToolResult({ toolCallId, result: 'Yes, confirmed' })}
+ >Yes
+
+ addToolResult({ toolCallId, result: 'No, denied' })}
+ >No
+
+
+ {:else if state === 'result'}
+
+ {part.toolInvocation.result}
+
+ {/if}
+ {:else if toolName === 'getLocation'}
+ {#if state === 'call'}
+
Getting location...
+ {:else if state === 'result'}
+
+ Location: {part.toolInvocation.result}
+
+ {/if}
+ {:else if toolName === 'getWeatherInformation'}
+ {#if state === 'partial-call'}
+
{JSON.stringify(part.toolInvocation, null, 2)}
+ {:else if state === 'call'}
+
+ Getting weather information for {part.toolInvocation.args
+ .city}...
+
+ {:else if state === 'result'}
+
+ Weather in {part.toolInvocation.args.city}: {part.toolInvocation
+ .result}
+
+ {/if}
{/if}
+ {/if}
{/each}
-
+
+ {/each}
+
diff --git a/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap
new file mode 100644
index 000000000000..4a2603afcb72
--- /dev/null
+++ b/packages/ai/core/prompt/__snapshots__/append-response-messages.test.ts.snap
@@ -0,0 +1,355 @@
+// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
+
+exports[`appendResponseMessages > adds assistant text response to previous assistant message 1`] = `
+[
+ {
+ "content": "User wants a tool invocation",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "User wants a tool invocation",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "This is a response from the assistant.",
+ "createdAt": 1970-01-01T00:00:00.456Z,
+ "id": "2",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "This is a response from the assistant.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ ],
+ },
+]
+`;
+
+exports[`appendResponseMessages > adds assistant tool call response to previous assistant message 1`] = `
+[
+ {
+ "content": "User wants a tool invocation",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "User wants a tool invocation",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "",
+ "createdAt": 1970-01-01T00:00:00.456Z,
+ "id": "2",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "another query",
+ },
+ "state": "call",
+ "step": 1,
+ "toolCallId": "call-2",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ {
+ "args": {
+ "query": "another query",
+ },
+ "state": "call",
+ "step": 1,
+ "toolCallId": "call-2",
+ "toolName": "some-tool",
+ },
+ ],
+ },
+]
+`;
+
+exports[`appendResponseMessages > adds chain of assistant messages and tool results 1`] = `
+[
+ {
+ "content": "User wants a tool invocation",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "User wants a tool invocation",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "response",
+ "createdAt": 1970-01-01T00:00:00.789Z,
+ "id": "2",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "another query",
+ },
+ "result": {
+ "answer": "another result",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "call-2",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "response",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ {
+ "args": {
+ "query": "another query",
+ },
+ "result": {
+ "answer": "another result",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "call-2",
+ "toolName": "some-tool",
+ },
+ ],
+ },
+]
+`;
+
+exports[`appendResponseMessages > adds tool results to the previously invoked tool calls (assistant message) 1`] = `
+[
+ {
+ "content": "User wants a tool invocation",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "User wants a tool invocation",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "Placeholder text",
+ "createdAt": 1970-01-01T00:00:00.456Z,
+ "id": "2",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "query": "some query",
+ },
+ "result": {
+ "answer": "Tool result data",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ ],
+ },
+]
+`;
+
+exports[`appendResponseMessages > appends assistant messages with text content 1`] = `
+[
+ {
+ "content": "Hello!",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "Hello!",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "This is a response from the assistant.",
+ "createdAt": 1970-01-01T00:00:00.789Z,
+ "id": "123",
+ "parts": [
+ {
+ "text": "This is a response from the assistant.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [],
+ },
+]
+`;
+
+exports[`appendResponseMessages > handles tool calls and marks them as "call" initially 1`] = `
+[
+ {
+ "content": "User wants a tool invocation",
+ "createdAt": 1970-01-01T00:00:00.123Z,
+ "id": "1",
+ "parts": [
+ {
+ "text": "User wants a tool invocation",
+ "type": "text",
+ },
+ ],
+ "role": "user",
+ },
+ {
+ "content": "Processing tool call...",
+ "createdAt": 1970-01-01T00:00:00.789Z,
+ "id": "123",
+ "parts": [
+ {
+ "text": "Processing tool call...",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "query": "some query",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "query": "some query",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "call-1",
+ "toolName": "some-tool",
+ },
+ ],
+ },
+]
+`;
diff --git a/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap b/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap
new file mode 100644
index 000000000000..cb1d97434ed1
--- /dev/null
+++ b/packages/ai/core/prompt/__snapshots__/convert-to-core-messages.test.ts.snap
@@ -0,0 +1,746 @@
+// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
+
+exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations (parts) 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Let me calculate that for you.",
+ "type": "text",
+ },
+ {
+ "args": {
+ "numbers": [
+ 1,
+ 2,
+ ],
+ "operation": "add",
+ },
+ "toolCallId": "call1",
+ "toolName": "calculator",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "3",
+ "toolCallId": "call1",
+ "toolName": "calculator",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Let me calculate that for you.",
+ "type": "text",
+ },
+ {
+ "args": {
+ "numbers": [
+ 1,
+ 2,
+ ],
+ "operation": "add",
+ },
+ "toolCallId": "call1",
+ "toolName": "calculator",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "3",
+ "toolCallId": "call1",
+ "toolName": "calculator",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations that have multi-part responses (parts) 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Let me calculate that for you.",
+ "type": "text",
+ },
+ {
+ "args": {},
+ "toolCallId": "call1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "experimental_content": [
+ {
+ "data": "imgbase64",
+ "type": "image",
+ },
+ ],
+ "result": [
+ {
+ "data": "imgbase64",
+ "type": "image",
+ },
+ ],
+ "toolCallId": "call1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle assistant message with tool invocations that have multi-part responses 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Let me calculate that for you.",
+ "type": "text",
+ },
+ {
+ "args": {},
+ "toolCallId": "call1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "experimental_content": [
+ {
+ "data": "imgbase64",
+ "type": "image",
+ },
+ ],
+ "result": [
+ {
+ "data": "imgbase64",
+ "type": "image",
+ },
+ ],
+ "toolCallId": "call1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle conversation with an assistant message that has empty tool invocations (parts) 1`] = `
+[
+ {
+ "content": "text1",
+ "role": "user",
+ },
+ {
+ "content": [
+ {
+ "text": "text2",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle conversation with an assistant message that has empty tool invocations 1`] = `
+[
+ {
+ "content": "text1",
+ "role": "user",
+ },
+ {
+ "content": "text2",
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle conversation with mix of tool invocations and text (parts) 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "i am gonna use tool1",
+ "type": "text",
+ },
+ {
+ "args": {
+ "value": "value-1",
+ },
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-1",
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "text": "i am gonna use tool2 and tool3",
+ "type": "text",
+ },
+ {
+ "args": {
+ "value": "value-2",
+ },
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ {
+ "args": {
+ "value": "value-3",
+ },
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-2",
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ {
+ "result": "result-3",
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-4",
+ },
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-4",
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "text": "final response",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle conversation with multiple tool invocations that have step information (parts) 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "response",
+ "type": "text",
+ },
+ {
+ "args": {
+ "value": "value-1",
+ },
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-1",
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-2",
+ },
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ {
+ "args": {
+ "value": "value-3",
+ },
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-2",
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ {
+ "result": "result-3",
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-4",
+ },
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-4",
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > assistant message > should handle conversation with multiple tool invocations that have step information 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "response",
+ "type": "text",
+ },
+ {
+ "args": {
+ "value": "value-1",
+ },
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-1",
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-2",
+ },
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ {
+ "args": {
+ "value": "value-3",
+ },
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-2",
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ {
+ "result": "result-3",
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-4",
+ },
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-4",
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > multiple messages > should convert fully typed Message[] 1`] = `
+[
+ {
+ "content": "What is the weather in Tokyo?",
+ "role": "user",
+ },
+ {
+ "content": "It is sunny in Tokyo.",
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > multiple messages > should handle conversation with multiple tool invocations and user message at the end (parts) 1`] = `
+[
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-1",
+ },
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-1",
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-2",
+ },
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ {
+ "args": {
+ "value": "value-3",
+ },
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-2",
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ {
+ "result": "result-3",
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-4",
+ },
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-4",
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "text": "response",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": "Thanks!",
+ "role": "user",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > multiple messages > should handle conversation with multiple tool invocations and user message at the end 1`] = `
+[
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-1",
+ },
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-1",
+ "toolCallId": "call-1",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-2",
+ },
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ {
+ "args": {
+ "value": "value-3",
+ },
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-2",
+ "toolCallId": "call-2",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ {
+ "result": "result-3",
+ "toolCallId": "call-3",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": [
+ {
+ "args": {
+ "value": "value-4",
+ },
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-call",
+ },
+ ],
+ "role": "assistant",
+ },
+ {
+ "content": [
+ {
+ "result": "result-4",
+ "toolCallId": "call-4",
+ "toolName": "screenshot",
+ "type": "tool-result",
+ },
+ ],
+ "role": "tool",
+ },
+ {
+ "content": "response",
+ "role": "assistant",
+ },
+ {
+ "content": "Thanks!",
+ "role": "user",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > user message > should handle user message with attachment URLs (file) 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Check this document",
+ "type": "text",
+ },
+ {
+ "data": "dGVzdA==",
+ "mimeType": "application/pdf",
+ "type": "file",
+ },
+ ],
+ "role": "user",
+ },
+]
+`;
+
+exports[`convertToCoreMessages > user message > should handle user message with attachment URLs 1`] = `
+[
+ {
+ "content": [
+ {
+ "text": "Check this image",
+ "type": "text",
+ },
+ {
+ "image": Uint8Array [
+ 116,
+ 101,
+ 115,
+ 116,
+ ],
+ "type": "image",
+ },
+ ],
+ "role": "user",
+ },
+]
+`;
diff --git a/packages/ai/core/prompt/append-response-messages.test.ts b/packages/ai/core/prompt/append-response-messages.test.ts
index b1143fd60ee1..b2fc17b478c9 100644
--- a/packages/ai/core/prompt/append-response-messages.test.ts
+++ b/packages/ai/core/prompt/append-response-messages.test.ts
@@ -10,6 +10,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'Hello!',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'Hello!' }],
},
],
responseMessages: [
@@ -19,23 +20,12 @@ describe('appendResponseMessages', () => {
id: '123',
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'Hello!',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: 'This is a response from the assistant.',
- id: '123',
- createdAt: expect.any(Date),
- toolInvocations: [],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('adds assistant text response to previous assistant message', () => {
@@ -46,6 +36,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User wants a tool invocation',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'User wants a tool invocation' }],
},
{
role: 'assistant',
@@ -62,6 +53,19 @@ describe('appendResponseMessages', () => {
step: 0,
},
],
+ parts: [
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ toolCallId: 'call-1',
+ toolName: 'some-tool',
+ state: 'result',
+ args: { query: 'some query' },
+ result: { answer: 'Tool result data' },
+ step: 0,
+ },
+ },
+ ],
},
],
responseMessages: [
@@ -71,32 +75,12 @@ describe('appendResponseMessages', () => {
id: '123',
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'User wants a tool invocation',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: 'This is a response from the assistant.',
- id: '2',
- createdAt: new Date(456),
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'some-tool',
- args: { query: 'some query' },
- result: { answer: 'Tool result data' },
- step: 0,
- },
- ],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('adds assistant tool call response to previous assistant message', () => {
@@ -107,6 +91,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User wants a tool invocation',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'User wants a tool invocation' }],
},
{
role: 'assistant',
@@ -123,6 +108,19 @@ describe('appendResponseMessages', () => {
step: 0,
},
],
+ parts: [
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ toolCallId: 'call-1',
+ toolName: 'some-tool',
+ state: 'result',
+ args: { query: 'some query' },
+ result: { answer: 'Tool result data' },
+ step: 0,
+ },
+ },
+ ],
},
],
responseMessages: [
@@ -139,39 +137,12 @@ describe('appendResponseMessages', () => {
id: '123',
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'User wants a tool invocation',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: '',
- id: '2',
- createdAt: new Date(456),
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'some-tool',
- args: { query: 'some query' },
- result: { answer: 'Tool result data' },
- step: 0,
- },
- {
- state: 'call',
- toolCallId: 'call-2',
- toolName: 'some-tool',
- args: { query: 'another query' },
- step: 1,
- },
- ],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('handles tool calls and marks them as "call" initially', () => {
@@ -182,6 +153,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User wants a tool invocation',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'User wants a tool invocation' }],
},
],
responseMessages: [
@@ -199,31 +171,12 @@ describe('appendResponseMessages', () => {
id: '123',
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'User wants a tool invocation',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: 'Processing tool call...',
- id: '123',
- createdAt: expect.any(Date),
- toolInvocations: [
- {
- state: 'call',
- toolCallId: 'call-1',
- toolName: 'some-tool',
- args: { query: 'some query' },
- step: 0,
- },
- ],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('adds tool results to the previously invoked tool calls (assistant message)', () => {
@@ -234,6 +187,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User wants a tool invocation',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'User wants a tool invocation' }],
},
{
role: 'assistant',
@@ -249,6 +203,18 @@ describe('appendResponseMessages', () => {
step: 0,
},
],
+ parts: [
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ toolCallId: 'call-1',
+ toolName: 'some-tool',
+ state: 'call',
+ args: { query: 'some query' },
+ step: 0,
+ },
+ },
+ ],
},
],
responseMessages: [
@@ -265,32 +231,12 @@ describe('appendResponseMessages', () => {
],
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'User wants a tool invocation',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: 'Placeholder text',
- id: '2',
- createdAt: new Date(456),
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'some-tool',
- args: { query: 'some query' },
- result: { answer: 'Tool result data' },
- step: 0,
- },
- ],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('adds chain of assistant messages and tool results', () => {
@@ -301,6 +247,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User wants a tool invocation',
createdAt: new Date(123),
+ parts: [{ type: 'text', text: 'User wants a tool invocation' }],
},
],
responseMessages: [
@@ -358,40 +305,12 @@ describe('appendResponseMessages', () => {
id: '6',
},
],
+ _internal: {
+ currentDate: () => new Date(789),
+ },
});
- expect(result).toStrictEqual([
- {
- role: 'user',
- id: '1',
- content: 'User wants a tool invocation',
- createdAt: new Date(123),
- },
- {
- role: 'assistant',
- content: 'response',
- id: '2',
- createdAt: expect.any(Date),
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'some-tool',
- args: { query: 'some query' },
- result: { answer: 'Tool result data' },
- step: 0,
- },
- {
- state: 'result',
- toolCallId: 'call-2',
- toolName: 'some-tool',
- args: { query: 'another query' },
- result: { answer: 'another result' },
- step: 1,
- },
- ],
- },
- ]);
+ expect(result).toMatchSnapshot();
});
it('throws an error if a tool result follows a non-assistant message', () => {
@@ -403,6 +322,7 @@ describe('appendResponseMessages', () => {
id: '1',
content: 'User message',
createdAt: new Date(),
+ parts: [{ type: 'text', text: 'User message' }],
},
],
responseMessages: [
diff --git a/packages/ai/core/prompt/append-response-messages.ts b/packages/ai/core/prompt/append-response-messages.ts
index 9f4bda63a9a4..ff598314b64d 100644
--- a/packages/ai/core/prompt/append-response-messages.ts
+++ b/packages/ai/core/prompt/append-response-messages.ts
@@ -2,6 +2,7 @@ import {
extractMaxToolInvocationStep,
Message,
ToolInvocation,
+ ToolInvocationUIPart,
} from '@ai-sdk/ui-utils';
import { ResponseMessage } from '../generate-text/step-result';
@@ -15,9 +16,17 @@ import { ResponseMessage } from '../generate-text/step-result';
export function appendResponseMessages({
messages,
responseMessages,
+ _internal: { currentDate = () => new Date() } = {},
}: {
messages: Message[];
responseMessages: ResponseMessage[];
+
+ /**
+Internal. For test use only. May change without notice.
+ */
+ _internal?: {
+ currentDate?: () => Date;
+ };
}): Message[] {
const clonedMessages = structuredClone(messages);
@@ -29,8 +38,7 @@ export function appendResponseMessages({
const isLastMessageAssistant = lastMessage.role === 'assistant';
switch (role) {
- case 'assistant': {
- // only include text in the content:
+ case 'assistant': // only include text in the content:
const textContent =
typeof message.content === 'string'
? message.content
@@ -58,24 +66,50 @@ export function appendResponseMessages({
lastMessage.toolInvocations,
);
+ lastMessage.parts ??= [];
+
lastMessage.content = textContent;
+ if (textContent.length > 0) {
+ lastMessage.parts.push({
+ type: 'text' as const,
+ text: textContent,
+ });
+ }
+
lastMessage.toolInvocations = [
...(lastMessage.toolInvocations ?? []),
...getToolInvocations(maxStep === undefined ? 0 : maxStep + 1),
];
+
+ getToolInvocations(maxStep === undefined ? 0 : maxStep + 1)
+ .map(call => ({
+ type: 'tool-invocation' as const,
+ toolInvocation: call,
+ }))
+ .forEach(part => {
+ lastMessage.parts!.push(part);
+ });
} else {
// last message was a user message, add the assistant message:
clonedMessages.push({
role: 'assistant',
id: message.id,
- createdAt: new Date(), // generate a createdAt date for the message, will be overridden by the client
+ createdAt: currentDate(), // generate a createdAt date for the message, will be overridden by the client
content: textContent,
toolInvocations: getToolInvocations(0),
+ parts: [
+ ...(textContent.length > 0
+ ? [{ type: 'text' as const, text: textContent }]
+ : []),
+ ...getToolInvocations(0).map(call => ({
+ type: 'tool-invocation' as const,
+ toolInvocation: call,
+ })),
+ ],
});
}
break;
- }
case 'tool': {
// for tool call results, add the result to previous message:
@@ -87,11 +121,19 @@ export function appendResponseMessages({
);
}
- for (const part of message.content) {
+ lastMessage.parts ??= [];
+
+ for (const contentPart of message.content) {
// find the tool call in the previous message:
const toolCall = lastMessage.toolInvocations.find(
- call => call.toolCallId === part.toolCallId,
+ call => call.toolCallId === contentPart.toolCallId,
);
+ const toolCallPart: ToolInvocationUIPart | undefined =
+ lastMessage.parts.find(
+ (part): part is ToolInvocationUIPart =>
+ part.type === 'tool-invocation' &&
+ part.toolInvocation.toolCallId === contentPart.toolCallId,
+ );
if (!toolCall) {
throw new Error('Tool call not found in previous message');
@@ -100,7 +142,16 @@ export function appendResponseMessages({
// add the result to the tool call:
toolCall.state = 'result';
const toolResult = toolCall as ToolInvocation & { state: 'result' };
- toolResult.result = part.result;
+ toolResult.result = contentPart.result;
+
+ if (toolCallPart) {
+ toolCallPart.toolInvocation = toolResult;
+ } else {
+ lastMessage.parts.push({
+ type: 'tool-invocation' as const,
+ toolInvocation: toolResult,
+ });
+ }
}
break;
diff --git a/packages/ai/core/prompt/convert-to-core-messages.test.ts b/packages/ai/core/prompt/convert-to-core-messages.test.ts
index 7881db3e5216..b31ea116c37b 100644
--- a/packages/ai/core/prompt/convert-to-core-messages.test.ts
+++ b/packages/ai/core/prompt/convert-to-core-messages.test.ts
@@ -3,272 +3,212 @@ import { convertToCoreMessages } from './convert-to-core-messages';
import { tool } from '../tool/tool';
import { z } from 'zod';
-describe('system message', () => {
- it('should convert a simple system message', () => {
- const result = convertToCoreMessages([
- { role: 'system', content: 'System message' },
- ]);
-
- expect(result).toEqual([{ role: 'system', content: 'System message' }]);
- });
-});
-
-describe('user message', () => {
- it('should convert a simple user message', () => {
- const result = convertToCoreMessages([
- { role: 'user', content: 'Hello, AI!' },
- ]);
-
- expect(result).toEqual([{ role: 'user', content: 'Hello, AI!' }]);
- });
-
- it('should handle user message with attachments', () => {
- const attachment: Attachment = {
- contentType: 'image/jpeg',
- url: 'https://example.com/image.jpg',
- };
-
- const result = convertToCoreMessages([
- {
- role: 'user',
- content: 'Check this image',
- experimental_attachments: [attachment],
- },
- ]);
-
- expect(result).toEqual([
- {
- role: 'user',
- content: [
- { type: 'text', text: 'Check this image' },
- { type: 'image', image: new URL('https://example.com/image.jpg') },
- ],
- },
- ]);
- });
+describe('convertToCoreMessages', () => {
+ describe('system message', () => {
+ it('should convert a simple system message', () => {
+ const result = convertToCoreMessages([
+ { role: 'system', content: 'System message' },
+ ]);
- it('should handle user message with attachments (file)', () => {
- const attachment: Attachment = {
- contentType: 'application/pdf',
- url: 'https://example.com/document.pdf',
- };
-
- const result = convertToCoreMessages([
- {
- role: 'user',
- content: 'Check this document',
- experimental_attachments: [attachment],
- },
- ]);
-
- expect(result).toEqual([
- {
- role: 'user',
- content: [
- { type: 'text', text: 'Check this document' },
- {
- type: 'file',
- data: new URL('https://example.com/document.pdf'),
- mimeType: 'application/pdf',
- },
- ],
- },
- ]);
+ expect(result).toEqual([{ role: 'system', content: 'System message' }]);
+ });
});
- it('should handle user message with attachment URLs', () => {
- const attachment: Attachment = {
- contentType: 'image/jpeg',
- url: 'data:image/jpg;base64,dGVzdA==',
- };
-
- const result = convertToCoreMessages([
- {
- role: 'user',
- content: 'Check this image',
- experimental_attachments: [attachment],
- },
- ]);
-
- expect(result).toEqual([
- {
- role: 'user',
- content: [
- { type: 'text', text: 'Check this image' },
- { type: 'image', image: new Uint8Array([116, 101, 115, 116]) },
- ],
- },
- ]);
- });
+ describe('user message', () => {
+ it('should convert a simple user message', () => {
+ const result = convertToCoreMessages([
+ { role: 'user', content: 'Hello, AI!' },
+ ]);
- it('should handle user message with attachment URLs (file)', () => {
- const attachment: Attachment = {
- contentType: 'application/pdf',
- url: 'data:application/pdf;base64,dGVzdA==',
- };
-
- const result = convertToCoreMessages([
- {
- role: 'user',
- content: 'Check this document',
- experimental_attachments: [attachment],
- },
- ]);
-
- expect(result).toEqual([
- {
- role: 'user',
- content: [
- { type: 'text', text: 'Check this document' },
- {
- type: 'file',
- data: 'dGVzdA==',
- mimeType: 'application/pdf',
- },
- ],
- },
- ]);
- });
+ expect(result).toEqual([{ role: 'user', content: 'Hello, AI!' }]);
+ });
- it('should throw an error for invalid attachment URLs', () => {
- const attachment: Attachment = {
- contentType: 'image/jpeg',
- url: 'invalid-url',
- };
+ it('should handle user message with attachments', () => {
+ const attachment: Attachment = {
+ contentType: 'image/jpeg',
+ url: 'https://example.com/image.jpg',
+ };
- expect(() => {
- convertToCoreMessages([
+ const result = convertToCoreMessages([
{
role: 'user',
content: 'Check this image',
experimental_attachments: [attachment],
},
]);
- }).toThrow('Invalid URL: invalid-url');
- });
- it('should throw an error for file attachments without contentType', () => {
- const attachment: Attachment = {
- url: 'data:application/pdf;base64,dGVzdA==',
- };
+ expect(result).toEqual([
+ {
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Check this image' },
+ { type: 'image', image: new URL('https://example.com/image.jpg') },
+ ],
+ },
+ ]);
+ });
+
+ it('should handle user message with attachments (file)', () => {
+ const attachment: Attachment = {
+ contentType: 'application/pdf',
+ url: 'https://example.com/document.pdf',
+ };
- expect(() => {
- convertToCoreMessages([
+ const result = convertToCoreMessages([
{
role: 'user',
- content: 'Check this file',
+ content: 'Check this document',
experimental_attachments: [attachment],
},
]);
- }).toThrow(
- 'If the attachment is not an image or text, it must specify a content type',
- );
- });
- it('should throw an error for invalid data URL format', () => {
- const attachment: Attachment = {
- contentType: 'image/jpeg',
- url: 'data:image/jpg;base64',
- };
+ expect(result).toEqual([
+ {
+ role: 'user',
+ content: [
+ { type: 'text', text: 'Check this document' },
+ {
+ type: 'file',
+ data: new URL('https://example.com/document.pdf'),
+ mimeType: 'application/pdf',
+ },
+ ],
+ },
+ ]);
+ });
+
+ it('should handle user message with attachment URLs', () => {
+ const attachment: Attachment = {
+ contentType: 'image/jpeg',
+ url: 'data:image/jpg;base64,dGVzdA==',
+ };
- expect(() => {
- convertToCoreMessages([
+ const result = convertToCoreMessages([
{
role: 'user',
content: 'Check this image',
experimental_attachments: [attachment],
},
]);
- }).toThrow(`Invalid data URL format: ${attachment.url}`);
- });
- it('should throw an error for unsupported attachment protocols', () => {
- const attachment: Attachment = {
- contentType: 'image/jpeg',
- url: 'ftp://example.com/image.jpg',
- };
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle user message with attachment URLs (file)', () => {
+ const attachment: Attachment = {
+ contentType: 'application/pdf',
+ url: 'data:application/pdf;base64,dGVzdA==',
+ };
- expect(() => {
- convertToCoreMessages([
+ const result = convertToCoreMessages([
{
role: 'user',
- content: 'Check this image',
+ content: 'Check this document',
experimental_attachments: [attachment],
},
]);
- }).toThrow('Unsupported URL protocol: ftp:');
- });
-});
-describe('assistant message', () => {
- it('should convert a simple assistant message', () => {
- const result = convertToCoreMessages([
- { role: 'assistant', content: 'Hello, human!' },
- ]);
+ expect(result).toMatchSnapshot();
+ });
- expect(result).toEqual([{ role: 'assistant', content: 'Hello, human!' }]);
- });
+ it('should throw an error for invalid attachment URLs', () => {
+ const attachment: Attachment = {
+ contentType: 'image/jpeg',
+ url: 'invalid-url',
+ };
- it('should handle assistant message with tool invocations', () => {
- const result = convertToCoreMessages([
- {
- role: 'assistant',
- content: 'Let me calculate that for you.',
- toolInvocations: [
+ expect(() => {
+ convertToCoreMessages([
{
- state: 'result',
- toolCallId: 'call1',
- toolName: 'calculator',
- args: { operation: 'add', numbers: [1, 2] },
- result: '3',
+ role: 'user',
+ content: 'Check this image',
+ experimental_attachments: [attachment],
},
- ],
- },
- ]);
+ ]);
+ }).toThrow('Invalid URL: invalid-url');
+ });
+
+ it('should throw an error for file attachments without contentType', () => {
+ const attachment: Attachment = {
+ url: 'data:application/pdf;base64,dGVzdA==',
+ };
- expect(result).toEqual([
- {
- role: 'assistant',
- content: [
+ expect(() => {
+ convertToCoreMessages([
{
- type: 'text',
- text: 'Let me calculate that for you.',
+ role: 'user',
+ content: 'Check this file',
+ experimental_attachments: [attachment],
},
+ ]);
+ }).toThrow(
+ 'If the attachment is not an image or text, it must specify a content type',
+ );
+ });
+
+ it('should throw an error for invalid data URL format', () => {
+ const attachment: Attachment = {
+ contentType: 'image/jpeg',
+ url: 'data:image/jpg;base64',
+ };
+
+ expect(() => {
+ convertToCoreMessages([
{
- type: 'tool-call',
- toolCallId: 'call1',
- toolName: 'calculator',
- args: { operation: 'add', numbers: [1, 2] },
+ role: 'user',
+ content: 'Check this image',
+ experimental_attachments: [attachment],
},
- ],
- },
- {
- role: 'tool',
- content: [
+ ]);
+ }).toThrow(`Invalid data URL format: ${attachment.url}`);
+ });
+
+ it('should throw an error for unsupported attachment protocols', () => {
+ const attachment: Attachment = {
+ contentType: 'image/jpeg',
+ url: 'ftp://example.com/image.jpg',
+ };
+
+ expect(() => {
+ convertToCoreMessages([
{
- type: 'tool-result',
- toolCallId: 'call1',
- toolName: 'calculator',
- result: '3',
+ role: 'user',
+ content: 'Check this image',
+ experimental_attachments: [attachment],
},
- ],
- },
- ]);
+ ]);
+ }).toThrow('Unsupported URL protocol: ftp:');
+ });
});
- it('should handle assistant message with tool invocations that have multi-part responses', () => {
- const tools = {
- screenshot: tool({
- parameters: z.object({}),
- execute: async () => 'imgbase64',
- experimental_toToolResultContent: result => [
- { type: 'image', data: result },
- ],
- }),
- };
+ describe('assistant message', () => {
+ it('should convert a simple assistant message', () => {
+ const result = convertToCoreMessages([
+ { role: 'assistant', content: 'Hello, human!' },
+ ]);
+
+ expect(result).toEqual([{ role: 'assistant', content: 'Hello, human!' }]);
+ });
+
+ it('should convert a simple assistant message (parts)', () => {
+ const result = convertToCoreMessages([
+ {
+ role: 'assistant',
+ content: '', // empty content
+ parts: [{ type: 'text', text: 'Hello, human!' }],
+ },
+ ]);
+
+ expect(result).toEqual([
+ {
+ role: 'assistant',
+ content: [{ type: 'text', text: 'Hello, human!' }],
+ },
+ ]);
+ });
- const result = convertToCoreMessages(
- [
+ it('should handle assistant message with tool invocations', () => {
+ const result = convertToCoreMessages([
{
role: 'assistant',
content: 'Let me calculate that for you.',
@@ -276,410 +216,554 @@ describe('assistant message', () => {
{
state: 'result',
toolCallId: 'call1',
- toolName: 'screenshot',
- args: {},
- result: 'imgbase64',
+ toolName: 'calculator',
+ args: { operation: 'add', numbers: [1, 2] },
+ result: '3',
},
],
},
- ],
- { tools }, // separate tools to ensure that types are inferred correctly
- );
-
- expect(result).toEqual([
- {
- role: 'assistant',
- content: [
- {
- type: 'text',
- text: 'Let me calculate that for you.',
- },
- {
- type: 'tool-call',
- toolCallId: 'call1',
- toolName: 'screenshot',
- args: {},
- },
- ],
- },
- {
- role: 'tool',
- content: [
- {
- type: 'tool-result',
- toolCallId: 'call1',
- toolName: 'screenshot',
- result: [{ type: 'image', data: 'imgbase64' }],
- experimental_content: [{ type: 'image', data: 'imgbase64' }],
- },
- ],
- },
- ]);
- });
-
- it('should handle conversation with an assistant message that has empty tool invocations', () => {
- const result = convertToCoreMessages([
- {
- role: 'user',
- content: 'text1',
- toolInvocations: [],
- },
- {
- role: 'assistant',
- content: 'text2',
- toolInvocations: [],
- },
- ]);
-
- expect(result).toEqual([
- {
- role: 'user',
- content: 'text1',
- },
- {
- role: 'assistant',
- content: 'text2',
- },
- ]);
- });
+ ]);
- it('should handle conversation with multiple tool invocations that have step information', () => {
- const tools = {
- screenshot: tool({
- parameters: z.object({ value: z.string() }),
- execute: async () => 'imgbase64',
- }),
- };
+ expect(result).toMatchSnapshot();
+ });
- const result = convertToCoreMessages(
- [
+ it('should handle assistant message with tool invocations (parts)', () => {
+ const result = convertToCoreMessages([
{
role: 'assistant',
- content: 'response',
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- args: { value: 'value-1' },
- result: 'result-1',
- step: 0,
- },
- {
- state: 'result',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- args: { value: 'value-2' },
- result: 'result-2',
- step: 1,
- },
-
- {
- state: 'result',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- args: { value: 'value-3' },
- result: 'result-3',
- step: 1,
- },
+ content: '', // empty content
+ toolInvocations: [], // empty invocations
+ parts: [
+ { type: 'text', text: 'Let me calculate that for you.' },
{
- state: 'result',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- args: { value: 'value-4' },
- result: 'result-4',
- step: 2,
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call1',
+ toolName: 'calculator',
+ args: { operation: 'add', numbers: [1, 2] },
+ result: '3',
+ step: 0,
+ },
},
],
},
- ],
- { tools }, // separate tools to ensure that types are inferred correctly
- );
-
- expect(result).toEqual([
- {
- role: 'assistant',
- content: [
- {
- type: 'text',
- text: 'response',
- },
- {
- type: 'tool-call',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- args: { value: 'value-1' },
- },
- ],
- },
- {
- role: 'tool',
- content: [
+ ]);
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle assistant message with tool invocations that have multi-part responses', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({}),
+ execute: async () => 'imgbase64',
+ experimental_toToolResultContent: result => [
+ { type: 'image', data: result },
+ ],
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-result',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- result: 'result-1',
+ role: 'assistant',
+ content: 'Let me calculate that for you.',
+ toolInvocations: [
+ {
+ state: 'result',
+ toolCallId: 'call1',
+ toolName: 'screenshot',
+ args: {},
+ result: 'imgbase64',
+ },
+ ],
},
],
- },
- {
- role: 'assistant',
- content: [
- {
- type: 'tool-call',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- args: { value: 'value-2' },
- },
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle assistant message with tool invocations that have multi-part responses (parts)', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({}),
+ execute: async () => 'imgbase64',
+ experimental_toToolResultContent: result => [
+ { type: 'image', data: result },
+ ],
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-call',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- args: { value: 'value-3' },
+ role: 'assistant',
+ content: '', // empty content
+ toolInvocations: [], // empty invocations
+ parts: [
+ { type: 'text', text: 'Let me calculate that for you.' },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call1',
+ toolName: 'screenshot',
+ args: {},
+ result: 'imgbase64',
+ step: 0,
+ },
+ },
+ ],
},
],
- },
- {
- role: 'tool',
- content: [
- {
- type: 'tool-result',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- result: 'result-2',
- },
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with an assistant message that has empty tool invocations', () => {
+ const result = convertToCoreMessages([
+ {
+ role: 'user',
+ content: 'text1',
+ toolInvocations: [],
+ },
+ {
+ role: 'assistant',
+ content: 'text2',
+ toolInvocations: [],
+ },
+ ]);
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with an assistant message that has empty tool invocations (parts)', () => {
+ const result = convertToCoreMessages([
+ {
+ role: 'user',
+ content: 'text1',
+ toolInvocations: [],
+ parts: [{ type: 'text', text: 'text1' }],
+ },
+ {
+ role: 'assistant',
+ content: '', // empty content
+ toolInvocations: [],
+ parts: [{ type: 'text', text: 'text2' }],
+ },
+ ]);
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with multiple tool invocations that have step information', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({ value: z.string() }),
+ execute: async () => 'imgbase64',
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-result',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- result: 'result-3',
+ role: 'assistant',
+ content: 'response',
+ toolInvocations: [
+ {
+ state: 'result',
+ toolCallId: 'call-1',
+ toolName: 'screenshot',
+ args: { value: 'value-1' },
+ result: 'result-1',
+ step: 0,
+ },
+ {
+ state: 'result',
+ toolCallId: 'call-2',
+ toolName: 'screenshot',
+ args: { value: 'value-2' },
+ result: 'result-2',
+ step: 1,
+ },
+
+ {
+ state: 'result',
+ toolCallId: 'call-3',
+ toolName: 'screenshot',
+ args: { value: 'value-3' },
+ result: 'result-3',
+ step: 1,
+ },
+ {
+ state: 'result',
+ toolCallId: 'call-4',
+ toolName: 'screenshot',
+ args: { value: 'value-4' },
+ result: 'result-4',
+ step: 2,
+ },
+ ],
},
],
- },
- {
- role: 'assistant',
- content: [
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with multiple tool invocations that have step information (parts)', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({ value: z.string() }),
+ execute: async () => 'imgbase64',
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-call',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- args: { value: 'value-4' },
+ role: 'assistant',
+ content: '', // empty content
+ toolInvocations: [], // empty invocations
+ parts: [
+ { type: 'text', text: 'response' },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-1',
+ toolName: 'screenshot',
+ args: { value: 'value-1' },
+ result: 'result-1',
+ step: 0,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-2',
+ toolName: 'screenshot',
+ args: { value: 'value-2' },
+ result: 'result-2',
+ step: 1,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-3',
+ toolName: 'screenshot',
+ args: { value: 'value-3' },
+ result: 'result-3',
+ step: 1,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-4',
+ toolName: 'screenshot',
+ args: { value: 'value-4' },
+ result: 'result-4',
+ step: 2,
+ },
+ },
+ ],
},
],
- },
- {
- role: 'tool',
- content: [
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with mix of tool invocations and text (parts)', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({ value: z.string() }),
+ execute: async () => 'imgbase64',
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-result',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- result: 'result-4',
+ role: 'assistant',
+ content: '', // empty content
+ toolInvocations: [], // empty invocations
+ parts: [
+ { type: 'text', text: 'i am gonna use tool1' },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-1',
+ toolName: 'screenshot',
+ args: { value: 'value-1' },
+ result: 'result-1',
+ step: 0,
+ },
+ },
+ { type: 'text', text: 'i am gonna use tool2 and tool3' },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-2',
+ toolName: 'screenshot',
+ args: { value: 'value-2' },
+ result: 'result-2',
+ step: 1,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-3',
+ toolName: 'screenshot',
+ args: { value: 'value-3' },
+ result: 'result-3',
+ step: 1,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-4',
+ toolName: 'screenshot',
+ args: { value: 'value-4' },
+ result: 'result-4',
+ step: 2,
+ },
+ },
+ { type: 'text', text: 'final response' },
+ ],
},
],
- },
- ]);
- });
-});
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
-describe('multiple messages', () => {
- it('should handle a conversation with multiple messages', () => {
- const result = convertToCoreMessages([
- { role: 'user', content: "What's the weather like?" },
- { role: 'assistant', content: "I'll check that for you." },
- { role: 'user', content: 'Thanks!' },
- ]);
-
- expect(result).toEqual([
- { role: 'user', content: "What's the weather like?" },
- { role: 'assistant', content: "I'll check that for you." },
- { role: 'user', content: 'Thanks!' },
- ]);
+ expect(result).toMatchSnapshot();
+ });
});
- it('should convert fully typed Message[]', () => {
- const messages: Message[] = [
- {
- id: '1',
- role: 'user',
- content: 'What is the weather in Tokyo?',
- },
- {
- id: '2',
- role: 'assistant',
- content: 'It is sunny in Tokyo.',
- },
- ];
-
- const result = convertToCoreMessages(messages);
-
- expect(result).toStrictEqual([
- {
- role: 'user',
- content: 'What is the weather in Tokyo?',
- },
- {
- role: 'assistant',
- content: 'It is sunny in Tokyo.',
- },
- ]);
- });
+ describe('multiple messages', () => {
+ it('should handle a conversation with multiple messages', () => {
+ const result = convertToCoreMessages([
+ { role: 'user', content: "What's the weather like?" },
+ { role: 'assistant', content: "I'll check that for you." },
+ { role: 'user', content: 'Thanks!' },
+ ]);
- it('should handle conversation with multiple tool invocations and user message at the end', () => {
- const tools = {
- screenshot: tool({
- parameters: z.object({ value: z.string() }),
- execute: async () => 'imgbase64',
- }),
- };
+ expect(result).toEqual([
+ { role: 'user', content: "What's the weather like?" },
+ { role: 'assistant', content: "I'll check that for you." },
+ { role: 'user', content: 'Thanks!' },
+ ]);
+ });
- const result = convertToCoreMessages(
- [
+ it('should handle a conversation with multiple messages (parts)', () => {
+ const result = convertToCoreMessages([
+ {
+ role: 'user',
+ content: "What's the weather like?",
+ parts: [{ type: 'text', text: "What's the weather like?" }],
+ },
{
role: 'assistant',
- content: 'response',
- toolInvocations: [
- {
- state: 'result',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- args: { value: 'value-1' },
- result: 'result-1',
- step: 0,
- },
- {
- state: 'result',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- args: { value: 'value-2' },
- result: 'result-2',
- step: 1,
- },
-
- {
- state: 'result',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- args: { value: 'value-3' },
- result: 'result-3',
- step: 1,
- },
- {
- state: 'result',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- args: { value: 'value-4' },
- result: 'result-4',
- step: 2,
- },
- ],
+ content: '',
+ parts: [{ type: 'text', text: "I'll check that for you." }],
},
{
role: 'user',
content: 'Thanks!',
+ parts: [{ type: 'text', text: 'Thanks!' }],
},
- ],
- { tools }, // separate tools to ensure that types are inferred correctly
- );
-
- expect(result).toEqual([
- {
- role: 'assistant',
- content: [
- {
- type: 'tool-call',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- args: { value: 'value-1' },
- },
- ],
- },
- {
- role: 'tool',
- content: [
- {
- type: 'tool-result',
- toolCallId: 'call-1',
- toolName: 'screenshot',
- result: 'result-1',
- },
- ],
- },
- {
- role: 'assistant',
- content: [
- {
- type: 'tool-call',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- args: { value: 'value-2' },
- },
- {
- type: 'tool-call',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- args: { value: 'value-3' },
- },
- ],
- },
- {
- role: 'tool',
- content: [
+ ]);
+
+ expect(result).toEqual([
+ { role: 'user', content: "What's the weather like?" },
+ {
+ role: 'assistant',
+ content: [{ type: 'text', text: "I'll check that for you." }],
+ },
+ { role: 'user', content: 'Thanks!' },
+ ]);
+ });
+
+ it('should convert fully typed Message[]', () => {
+ const messages: Message[] = [
+ {
+ id: '1',
+ role: 'user',
+ content: 'What is the weather in Tokyo?',
+ },
+ {
+ id: '2',
+ role: 'assistant',
+ content: 'It is sunny in Tokyo.',
+ },
+ ];
+
+ const result = convertToCoreMessages(messages);
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with multiple tool invocations and user message at the end', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({ value: z.string() }),
+ execute: async () => 'imgbase64',
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-result',
- toolCallId: 'call-2',
- toolName: 'screenshot',
- result: 'result-2',
+ role: 'assistant',
+ content: 'response',
+ toolInvocations: [
+ {
+ state: 'result',
+ toolCallId: 'call-1',
+ toolName: 'screenshot',
+ args: { value: 'value-1' },
+ result: 'result-1',
+ step: 0,
+ },
+ {
+ state: 'result',
+ toolCallId: 'call-2',
+ toolName: 'screenshot',
+ args: { value: 'value-2' },
+ result: 'result-2',
+ step: 1,
+ },
+
+ {
+ state: 'result',
+ toolCallId: 'call-3',
+ toolName: 'screenshot',
+ args: { value: 'value-3' },
+ result: 'result-3',
+ step: 1,
+ },
+ {
+ state: 'result',
+ toolCallId: 'call-4',
+ toolName: 'screenshot',
+ args: { value: 'value-4' },
+ result: 'result-4',
+ step: 2,
+ },
+ ],
},
{
- type: 'tool-result',
- toolCallId: 'call-3',
- toolName: 'screenshot',
- result: 'result-3',
+ role: 'user',
+ content: 'Thanks!',
},
],
- },
- {
- role: 'assistant',
- content: [
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
+
+ it('should handle conversation with multiple tool invocations and user message at the end (parts)', () => {
+ const tools = {
+ screenshot: tool({
+ parameters: z.object({ value: z.string() }),
+ execute: async () => 'imgbase64',
+ }),
+ };
+
+ const result = convertToCoreMessages(
+ [
{
- type: 'tool-call',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- args: { value: 'value-4' },
+ role: 'assistant',
+ content: '',
+ toolInvocations: [],
+ parts: [
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-1',
+ toolName: 'screenshot',
+ args: { value: 'value-1' },
+ result: 'result-1',
+ step: 0,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-2',
+ toolName: 'screenshot',
+ args: { value: 'value-2' },
+ result: 'result-2',
+ step: 1,
+ },
+ },
+
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-3',
+ toolName: 'screenshot',
+ args: { value: 'value-3' },
+ result: 'result-3',
+ step: 1,
+ },
+ },
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
+ state: 'result',
+ toolCallId: 'call-4',
+ toolName: 'screenshot',
+ args: { value: 'value-4' },
+ result: 'result-4',
+ step: 2,
+ },
+ },
+ { type: 'text', text: 'response' },
+ ],
},
- ],
- },
- {
- role: 'tool',
- content: [
{
- type: 'tool-result',
- toolCallId: 'call-4',
- toolName: 'screenshot',
- result: 'result-4',
+ role: 'user',
+ content: 'Thanks!',
+ parts: [{ type: 'text', text: 'Thanks!' }],
},
],
- },
- {
- role: 'assistant',
- content: 'response',
- },
- {
- role: 'user',
- content: 'Thanks!',
- },
- ]);
+ { tools }, // separate tools to ensure that types are inferred correctly
+ );
+
+ expect(result).toMatchSnapshot();
+ });
});
-});
-describe('error handling', () => {
- it('should throw an error for unhandled roles', () => {
- expect(() => {
- convertToCoreMessages([
- { role: 'unknown' as any, content: 'unknown role message' },
- ]);
- }).toThrow('Unsupported role: unknown');
+ describe('error handling', () => {
+ it('should throw an error for unhandled roles', () => {
+ expect(() => {
+ convertToCoreMessages([
+ { role: 'unknown' as any, content: 'unknown role message' },
+ ]);
+ }).toThrow('Unsupported role: unknown');
+ });
});
});
diff --git a/packages/ai/core/prompt/convert-to-core-messages.ts b/packages/ai/core/prompt/convert-to-core-messages.ts
index 090d0447d572..190b2146ea60 100644
--- a/packages/ai/core/prompt/convert-to-core-messages.ts
+++ b/packages/ai/core/prompt/convert-to-core-messages.ts
@@ -1,15 +1,15 @@
+import { Message, TextUIPart, ToolInvocationUIPart } from '@ai-sdk/ui-utils';
import { ToolSet } from '../generate-text/tool-set';
import { CoreMessage, ToolCallPart, ToolResultPart } from '../prompt';
import { attachmentsToParts } from './attachments-to-parts';
import { MessageConversionError } from './message-conversion-error';
-import { UIMessage } from './ui-message';
/**
Converts an array of messages from useChat into an array of CoreMessages that can be used
with the AI core functions (e.g. `streamText`).
*/
export function convertToCoreMessages(
- messages: Array,
+ messages: Array>,
options?: { tools?: TOOLS },
) {
const tools = options?.tools ?? ({} as TOOLS);
@@ -18,8 +18,7 @@ export function convertToCoreMessages(
for (let i = 0; i < messages.length; i++) {
const message = messages[i];
const isLastMessage = i === messages.length - 1;
- const { role, content, toolInvocations, experimental_attachments } =
- message;
+ const { role, content, experimental_attachments } = message;
switch (role) {
case 'system': {
@@ -44,6 +43,116 @@ export function convertToCoreMessages(
}
case 'assistant': {
+ if (message.parts != null) {
+ let currentStep = 0;
+ let blockHasToolInvocations = false;
+ let block: Array = [];
+
+ function processBlock() {
+ coreMessages.push({
+ role: 'assistant',
+ content: block.map(part => {
+ switch (part.type) {
+ case 'text':
+ return {
+ type: 'text' as const,
+ text: part.text,
+ };
+ default:
+ return {
+ type: 'tool-call' as const,
+ toolCallId: part.toolInvocation.toolCallId,
+ toolName: part.toolInvocation.toolName,
+ args: part.toolInvocation.args,
+ };
+ }
+ }),
+ });
+
+ // check if there are tool invocations with results in the block
+ const stepInvocations = block
+ .filter(
+ (
+ part: TextUIPart | ToolInvocationUIPart,
+ ): part is ToolInvocationUIPart =>
+ part.type === 'tool-invocation',
+ )
+ .map(part => part.toolInvocation);
+
+ // tool message with tool results
+ if (stepInvocations.length > 0) {
+ coreMessages.push({
+ role: 'tool',
+ content: stepInvocations.map(
+ (toolInvocation): ToolResultPart => {
+ if (!('result' in toolInvocation)) {
+ throw new MessageConversionError({
+ originalMessage: message,
+ message:
+ 'ToolInvocation must have a result: ' +
+ JSON.stringify(toolInvocation),
+ });
+ }
+
+ const { toolCallId, toolName, result } = toolInvocation;
+
+ const tool = tools[toolName];
+ return tool?.experimental_toToolResultContent != null
+ ? {
+ type: 'tool-result',
+ toolCallId,
+ toolName,
+ result: tool.experimental_toToolResultContent(result),
+ experimental_content:
+ tool.experimental_toToolResultContent(result),
+ }
+ : {
+ type: 'tool-result',
+ toolCallId,
+ toolName,
+ result,
+ };
+ },
+ ),
+ });
+ }
+
+ // updates for next block
+ block = [];
+ blockHasToolInvocations = false;
+ currentStep++;
+ }
+
+ for (const part of message.parts) {
+ switch (part.type) {
+ case 'reasoning':
+ // reasoning is not sent back to the LLM
+ break;
+ case 'text': {
+ if (blockHasToolInvocations) {
+ processBlock(); // text must come before tool invocations
+ }
+ block.push(part);
+ break;
+ }
+ case 'tool-invocation': {
+ if ((part.toolInvocation.step ?? 0) !== currentStep) {
+ processBlock();
+ }
+ block.push(part);
+ blockHasToolInvocations = true;
+ break;
+ }
+ }
+ }
+
+ processBlock();
+
+ break;
+ }
+
+ const toolInvocations = message.toolInvocations;
+
if (toolInvocations == null || toolInvocations.length === 0) {
coreMessages.push({ role: 'assistant', content });
break;
diff --git a/packages/ai/core/prompt/detect-prompt-type.test.ts b/packages/ai/core/prompt/detect-prompt-type.test.ts
index aa75dcccd40c..6f6e60752e36 100644
--- a/packages/ai/core/prompt/detect-prompt-type.test.ts
+++ b/packages/ai/core/prompt/detect-prompt-type.test.ts
@@ -1,5 +1,5 @@
+import { Message } from '@ai-sdk/ui-utils';
import { detectPromptType } from './detect-prompt-type';
-import type { UIMessage } from './ui-message';
import type { CoreMessage } from './message';
it('should return "other" for invalid inputs', () => {
@@ -13,7 +13,7 @@ it('should return "messages" for empty arrays', () => {
});
it('should detect UI messages with data role', () => {
- const messages: UIMessage[] = [
+ const messages: Omit[] = [
{
role: 'data',
content: 'some data',
@@ -23,7 +23,7 @@ it('should detect UI messages with data role', () => {
});
it('should detect UI messages with toolInvocations', () => {
- const messages: UIMessage[] = [
+ const messages: Omit[] = [
{
role: 'assistant',
content: 'Hello',
@@ -42,7 +42,7 @@ it('should detect UI messages with toolInvocations', () => {
});
it('should detect UI messages with experimental_attachments', () => {
- const messages: UIMessage[] = [
+ const messages: Omit[] = [
{
role: 'user',
content: 'Check this file',
diff --git a/packages/ai/core/prompt/message-conversion-error.ts b/packages/ai/core/prompt/message-conversion-error.ts
index 96b5fc039d7a..6472b828b35b 100644
--- a/packages/ai/core/prompt/message-conversion-error.ts
+++ b/packages/ai/core/prompt/message-conversion-error.ts
@@ -1,5 +1,5 @@
import { AISDKError } from '@ai-sdk/provider';
-import { UIMessage } from './ui-message';
+import { Message } from '@ai-sdk/ui-utils';
const name = 'AI_MessageConversionError';
const marker = `vercel.ai.error.${name}`;
@@ -8,13 +8,13 @@ const symbol = Symbol.for(marker);
export class MessageConversionError extends AISDKError {
private readonly [symbol] = true; // used in isInstance
- readonly originalMessage: UIMessage;
+ readonly originalMessage: Omit;
constructor({
originalMessage,
message,
}: {
- originalMessage: UIMessage;
+ originalMessage: Omit;
message: string;
}) {
super({ name, message });
diff --git a/packages/ai/core/prompt/prompt.ts b/packages/ai/core/prompt/prompt.ts
index dd0bfc111aea..5688b1609f23 100644
--- a/packages/ai/core/prompt/prompt.ts
+++ b/packages/ai/core/prompt/prompt.ts
@@ -1,5 +1,5 @@
+import { Message } from '@ai-sdk/ui-utils';
import { CoreMessage } from './message';
-import { UIMessage } from './ui-message';
/**
Prompt part of the AI function options.
@@ -19,5 +19,5 @@ A simple text prompt. You can either use `prompt` or `messages` but not both.
/**
A list of messages. You can either use `prompt` or `messages` but not both.
*/
- messages?: Array | Array;
+ messages?: Array | Array>;
};
diff --git a/packages/ai/core/prompt/standardize-prompt.ts b/packages/ai/core/prompt/standardize-prompt.ts
index 1110b0180b4e..bac55d20bcf6 100644
--- a/packages/ai/core/prompt/standardize-prompt.ts
+++ b/packages/ai/core/prompt/standardize-prompt.ts
@@ -1,12 +1,12 @@
import { InvalidPromptError } from '@ai-sdk/provider';
import { safeValidateTypes } from '@ai-sdk/provider-utils';
+import { Message } from '@ai-sdk/ui-utils';
import { z } from 'zod';
import { ToolSet } from '../generate-text/tool-set';
import { convertToCoreMessages } from './convert-to-core-messages';
import { detectPromptType } from './detect-prompt-type';
import { CoreMessage, coreMessageSchema } from './message';
import { Prompt } from './prompt';
-import { UIMessage } from './ui-message';
export type StandardizedPrompt = {
/**
@@ -90,7 +90,7 @@ export function standardizePrompt({
const messages: CoreMessage[] =
promptType === 'ui-messages'
- ? convertToCoreMessages(prompt.messages as UIMessage[], {
+ ? convertToCoreMessages(prompt.messages as Omit[], {
tools,
})
: (prompt.messages as CoreMessage[]);
diff --git a/packages/ai/core/prompt/ui-message.ts b/packages/ai/core/prompt/ui-message.ts
deleted file mode 100644
index 4bd1bd511eba..000000000000
--- a/packages/ai/core/prompt/ui-message.ts
+++ /dev/null
@@ -1,10 +0,0 @@
-import { Attachment, ToolInvocation } from '@ai-sdk/ui-utils';
-
-// only for internal use - should be removed when we fully migrate to core messages
-export type UIMessage = {
- role: 'system' | 'user' | 'assistant' | 'data';
-
- content: string;
- toolInvocations?: ToolInvocation[];
- experimental_attachments?: Attachment[];
-};
diff --git a/packages/react/src/use-assistant.ts b/packages/react/src/use-assistant.ts
index 0af3802635e1..0436adedc69f 100644
--- a/packages/react/src/use-assistant.ts
+++ b/packages/react/src/use-assistant.ts
@@ -185,6 +185,7 @@ export function useAssistant({
id: value.id,
role: value.role,
content: value.content[0].text.value,
+ parts: [],
},
]);
},
@@ -198,6 +199,7 @@ export function useAssistant({
id: lastMessage.id,
role: lastMessage.role,
content: lastMessage.content + value,
+ parts: lastMessage.parts,
},
];
});
@@ -220,6 +222,7 @@ export function useAssistant({
role: 'data',
content: '',
data: value.data,
+ parts: [],
},
]);
},
@@ -257,7 +260,7 @@ export function useAssistant({
return;
}
- append({ role: 'user', content: input }, requestOptions);
+ append({ role: 'user', content: input, parts: [] }, requestOptions);
};
const setThreadId = (threadId: string | undefined) => {
diff --git a/packages/react/src/use-chat.ts b/packages/react/src/use-chat.ts
index 311f16e4f120..27f7d25efdde 100644
--- a/packages/react/src/use-chat.ts
+++ b/packages/react/src/use-chat.ts
@@ -4,13 +4,19 @@ import type {
CreateMessage,
JSONValue,
Message,
+ UIMessage,
UseChatOptions,
} from '@ai-sdk/ui-utils';
import {
callChatApi,
extractMaxToolInvocationStep,
+ fillMessageParts,
generateId as generateIdFunc,
+ getMessageParts,
+ isAssistantMessageWithCompletedToolCalls,
prepareAttachmentsForRequest,
+ shouldResubmitMessages,
+ updateToolCallResult,
} from '@ai-sdk/ui-utils';
import { useCallback, useEffect, useRef, useState } from 'react';
import useSWR from 'swr';
@@ -20,7 +26,7 @@ export type { CreateMessage, Message, UseChatOptions };
export type UseChatHelpers = {
/** Current messages in the chat */
- messages: Message[];
+ messages: UIMessage[];
/** The error object of the API request */
error: undefined | Error;
/**
@@ -162,14 +168,19 @@ By default, it's set to 1, which means that only a single LLM call is made.
const [initialMessagesFallback] = useState([]);
// Store the chat state in SWR, using the chatId as the key to share states.
- const { data: messages, mutate } = useSWR(
+ const { data: messages, mutate } = useSWR(
[chatKey, 'messages'],
null,
- { fallbackData: initialMessages ?? initialMessagesFallback },
+ {
+ fallbackData:
+ initialMessages != null
+ ? fillMessageParts(initialMessages)
+ : initialMessagesFallback,
+ },
);
// Keep the latest messages in a ref.
- const messagesRef = useRef(messages || []);
+ const messagesRef = useRef(messages || []);
useEffect(() => {
messagesRef.current = messages || [];
}, [messages]);
@@ -214,9 +225,11 @@ By default, it's set to 1, which means that only a single LLM call is made.
const triggerRequest = useCallback(
async (chatRequest: ChatRequest) => {
- const messageCount = chatRequest.messages.length;
+ const chatMessages = fillMessageParts(chatRequest.messages);
+
+ const messageCount = chatMessages.length;
const maxStep = extractMaxToolInvocationStep(
- chatRequest.messages[chatRequest.messages.length - 1]?.toolInvocations,
+ chatMessages[chatMessages.length - 1]?.toolInvocations,
);
try {
@@ -234,11 +247,11 @@ By default, it's set to 1, which means that only a single LLM call is made.
// Do an optimistic update to the chat state to show the updated messages immediately:
const previousMessages = messagesRef.current;
- throttledMutate(chatRequest.messages, false);
+ throttledMutate(chatMessages, false);
const constructedMessagesPayload = sendExtraMessageFields
- ? chatRequest.messages
- : chatRequest.messages.map(
+ ? chatMessages
+ : chatMessages.map(
({
role,
content,
@@ -246,6 +259,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
data,
annotations,
toolInvocations,
+ parts,
}) => ({
role,
content,
@@ -255,6 +269,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
...(data !== undefined && { data }),
...(annotations !== undefined && { annotations }),
...(toolInvocations !== undefined && { toolInvocations }),
+ ...(parts !== undefined && { parts }),
}),
);
@@ -264,7 +279,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
api,
body: experimental_prepareRequestBody?.({
id: chatId,
- messages: chatRequest.messages,
+ messages: chatMessages,
requestData: chatRequest.data,
requestBody: chatRequest.body,
}) ?? {
@@ -291,11 +306,8 @@ By default, it's set to 1, which means that only a single LLM call is made.
throttledMutate(
[
...(replaceLastMessage
- ? chatRequest.messages.slice(
- 0,
- chatRequest.messages.length - 1,
- )
- : chatRequest.messages),
+ ? chatMessages.slice(0, chatMessages.length - 1)
+ : chatMessages),
message,
],
false,
@@ -312,7 +324,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
onFinish,
generateId,
fetch,
- lastMessage: chatRequest.messages[chatRequest.messages.length - 1],
+ lastMessage: chatMessages[chatMessages.length - 1],
});
abortControllerRef.current = null;
@@ -335,23 +347,13 @@ By default, it's set to 1, which means that only a single LLM call is made.
// auto-submit when all tool calls in the last assistant message have results
// and assistant has not answered yet
const messages = messagesRef.current;
- const lastMessage = messages[messages.length - 1];
if (
- // ensure there is a last message:
- lastMessage != null &&
- // ensure we actually have new steps (to prevent infinite loops in case of errors):
- (messages.length > messageCount ||
- extractMaxToolInvocationStep(lastMessage.toolInvocations) !==
- maxStep) &&
- // check if the feature is enabled:
- maxSteps > 1 &&
- // check that next step is possible:
- isAssistantMessageWithCompletedToolCalls(lastMessage) &&
- // check that assistant has not answered yet:
- !lastMessage.content && // empty string or undefined
- // limit the number of automatic steps:
- (extractMaxToolInvocationStep(lastMessage.toolInvocations) ?? 0) <
- maxSteps
+ shouldResubmitMessages({
+ originalMaxToolInvocationStep: maxStep,
+ originalMessageCount: messageCount,
+ maxSteps,
+ messages,
+ })
) {
await triggerRequest({ messages });
}
@@ -402,6 +404,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
createdAt: message.createdAt ?? new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
+ parts: getMessageParts(message),
});
return triggerRequest({ messages, headers, body, data });
@@ -443,8 +446,9 @@ By default, it's set to 1, which means that only a single LLM call is made.
messages = messages(messagesRef.current);
}
- mutate(messages, false);
- messagesRef.current = messages;
+ const messagesWithParts = fillMessageParts(messages);
+ mutate(messagesWithParts, false);
+ messagesRef.current = messagesWithParts;
},
[mutate],
);
@@ -497,6 +501,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
content: input,
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
+ parts: [{ type: 'text', text: input }],
});
const chatRequest: ChatRequest = {
@@ -518,40 +523,28 @@ By default, it's set to 1, which means that only a single LLM call is made.
};
const addToolResult = useCallback(
- ({ toolCallId, result }: { toolCallId: string; result: any }) => {
- const updatedMessages = messagesRef.current.map((message, index, arr) =>
- // update the tool calls in the last assistant message:
- index === arr.length - 1 &&
- message.role === 'assistant' &&
- message.toolInvocations
- ? {
- ...message,
- toolInvocations: message.toolInvocations.map(toolInvocation =>
- toolInvocation.toolCallId === toolCallId
- ? {
- ...toolInvocation,
- result,
- state: 'result' as const,
- }
- : toolInvocation,
- ),
- }
- : message,
- );
+ ({ toolCallId, result }: { toolCallId: string; result: unknown }) => {
+ const currentMessages = messagesRef.current;
+
+ updateToolCallResult({
+ messages: currentMessages,
+ toolCallId,
+ toolResult: result,
+ });
- mutate(updatedMessages, false);
+ mutate(currentMessages, false);
// auto-submit when all tool calls in the last assistant message have results:
- const lastMessage = updatedMessages[updatedMessages.length - 1];
+ const lastMessage = currentMessages[currentMessages.length - 1];
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
- triggerRequest({ messages: updatedMessages });
+ triggerRequest({ messages: currentMessages });
}
},
[mutate, triggerRequest],
);
return {
- messages: messages || [],
+ messages: messages ?? [],
id: chatId,
setMessages,
data: streamData,
@@ -568,21 +561,3 @@ By default, it's set to 1, which means that only a single LLM call is made.
addToolResult,
};
}
-
-/**
-Check if the message is an assistant message with completed tool calls.
-The message must have at least one tool invocation and all tool invocations
-must have a result.
- */
-function isAssistantMessageWithCompletedToolCalls(
- message: Message,
-): message is Message & {
- role: 'assistant';
-} {
- return (
- message.role === 'assistant' &&
- message.toolInvocations != null &&
- message.toolInvocations.length > 0 &&
- message.toolInvocations.every(toolInvocation => 'result' in toolInvocation)
- );
-}
diff --git a/packages/react/src/use-chat.ui.test.tsx b/packages/react/src/use-chat.ui.test.tsx
index 4c85389fd073..5f6c94571359 100644
--- a/packages/react/src/use-chat.ui.test.tsx
+++ b/packages/react/src/use-chat.ui.test.tsx
@@ -271,6 +271,7 @@ describe('data protocol stream', () => {
createdAt: expect.any(Date),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'stop',
@@ -300,7 +301,13 @@ describe('data protocol stream', () => {
expect(await call(0).getRequestBodyJson()).toStrictEqual({
id: screen.getByTestId('id').textContent,
- messages: [{ role: 'user', content: 'hi' }],
+ messages: [
+ {
+ role: 'user',
+ content: 'hi',
+ parts: [{ text: 'hi', type: 'text' }],
+ },
+ ],
});
},
),
@@ -453,6 +460,7 @@ describe('text stream', () => {
createdAt: expect.any(Date),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'unknown',
@@ -714,6 +722,7 @@ describe('prepareRequestBody', () => {
id: expect.any(String),
experimental_attachments: undefined,
createdAt: expect.any(Date),
+ parts: [{ type: 'text', text: 'hi' }],
},
],
requestData: { 'test-data-key': 'test-data-value' },
@@ -1301,6 +1310,7 @@ describe('file attachments with data url', () => {
url: 'data:text/plain;base64,dGVzdCBmaWxlIGNvbnRlbnQ=',
},
],
+ parts: [{ text: 'Message with text attachment', type: 'text' }],
},
],
});
@@ -1359,6 +1369,7 @@ describe('file attachments with data url', () => {
url: 'data:image/png;base64,dGVzdCBpbWFnZSBjb250ZW50',
},
],
+ parts: [{ text: 'Message with image attachment', type: 'text' }],
},
],
});
@@ -1483,6 +1494,7 @@ describe('file attachments with url', () => {
url: 'https://example.com/image.png',
},
],
+ parts: [{ text: 'Message with image attachment', type: 'text' }],
},
],
});
@@ -1581,6 +1593,7 @@ describe('attachments with empty submit', () => {
url: 'https://example.com/image.png',
},
],
+ parts: [{ text: '', type: 'text' }],
},
],
});
@@ -1688,6 +1701,7 @@ describe('should append message with attachments', () => {
url: 'https://example.com/image.png',
},
],
+ parts: [{ text: 'Message with image attachment', type: 'text' }],
},
],
});
@@ -1767,7 +1781,13 @@ describe('reload', () => {
expect(await call(1).getRequestBodyJson()).toStrictEqual({
id: expect.any(String),
- messages: [{ content: 'hi', role: 'user' }],
+ messages: [
+ {
+ content: 'hi',
+ role: 'user',
+ parts: [{ text: 'hi', type: 'text' }],
+ },
+ ],
data: { 'test-data-key': 'test-data-value' },
'request-body-key': 'request-body-value',
});
@@ -1845,6 +1865,7 @@ describe('test sending additional fields during message submission', () => {
role: 'user',
content: 'hi',
annotations: ['this is an annotation'],
+ parts: [{ text: 'hi', type: 'text' }],
},
],
});
diff --git a/packages/solid/src/use-assistant.ts b/packages/solid/src/use-assistant.ts
index 829e56880bd3..9705dcdf99a8 100644
--- a/packages/solid/src/use-assistant.ts
+++ b/packages/solid/src/use-assistant.ts
@@ -2,19 +2,14 @@ import { isAbortError } from '@ai-sdk/provider-utils';
import {
AssistantStatus,
CreateMessage,
- Message,
- UseAssistantOptions,
generateId,
+ Message,
processAssistantStream,
+ UseAssistantOptions,
} from '@ai-sdk/ui-utils';
import { Accessor, createMemo, createSignal, JSX, Setter } from 'solid-js';
+import { createStore, SetStoreFunction, Store } from 'solid-js/store';
import { convertToAccessorOptions } from './utils/convert-to-accessor-options';
-import {
- createStore,
- SetStoreFunction,
- Store,
- StoreSetter,
-} from 'solid-js/store';
// use function to allow for mocking in tests:
const getOriginalFetch = () => fetch;
@@ -189,6 +184,7 @@ export function useAssistant(
id: value.id,
role: value.role,
content: value.content[0].text.value,
+ parts: [],
},
]);
},
@@ -202,6 +198,7 @@ export function useAssistant(
id: lastMessage.id,
role: lastMessage.role,
content: lastMessage.content + value,
+ parts: lastMessage.parts,
},
];
});
@@ -224,6 +221,7 @@ export function useAssistant(
role: 'data',
content: '',
data: value.data,
+ parts: [],
},
]);
},
@@ -262,7 +260,7 @@ export function useAssistant(
return;
}
- append({ role: 'user', content: input() }, requestOptions);
+ append({ role: 'user', content: input(), parts: [] }, requestOptions);
};
const setThreadId = (threadId: string | undefined) => {
diff --git a/packages/solid/src/use-chat.ts b/packages/solid/src/use-chat.ts
index 86c29ee56eb8..d50f01841a8f 100644
--- a/packages/solid/src/use-chat.ts
+++ b/packages/solid/src/use-chat.ts
@@ -7,12 +7,18 @@ import type {
JSONValue,
Message,
UseChatOptions as SharedUseChatOptions,
+ UIMessage,
} from '@ai-sdk/ui-utils';
import {
callChatApi,
extractMaxToolInvocationStep,
+ fillMessageParts,
generateId as generateIdFunc,
+ getMessageParts,
+ isAssistantMessageWithCompletedToolCalls,
prepareAttachmentsForRequest,
+ shouldResubmitMessages,
+ updateToolCallResult,
} from '@ai-sdk/ui-utils';
import {
Accessor,
@@ -32,7 +38,7 @@ export type UseChatHelpers = {
/**
* Current messages in the chat as a SolidJS store.
*/
- messages: () => Store;
+ messages: () => Store;
/** The error object of the API request */
error: Accessor;
@@ -114,11 +120,11 @@ or to provide a custom fetch implementation for e.g. testing.
const processStreamedResponse = async (
api: string,
chatRequest: ChatRequest,
- mutate: (data: Message[]) => void,
+ mutate: (data: UIMessage[]) => void,
setStreamData: Setter,
streamData: Accessor,
extraMetadata: any,
- messagesRef: Message[],
+ messagesRef: UIMessage[],
abortController: AbortController | null,
generateId: IdGenerator,
streamProtocol: UseChatOptions['streamProtocol'] = 'data',
@@ -133,14 +139,15 @@ const processStreamedResponse = async (
// Do an optimistic update to the chat state to show the updated messages
// immediately.
const previousMessages = messagesRef;
+ const chatMessages = fillMessageParts(chatRequest.messages);
- mutate(chatRequest.messages);
+ mutate(chatMessages);
const existingStreamData = streamData() ?? [];
const constructedMessagesPayload = sendExtraMessageFields
- ? chatRequest.messages
- : chatRequest.messages.map(
+ ? chatMessages
+ : chatMessages.map(
({
role,
content,
@@ -148,6 +155,7 @@ const processStreamedResponse = async (
data,
annotations,
toolInvocations,
+ parts,
}) => ({
role,
content,
@@ -157,6 +165,7 @@ const processStreamedResponse = async (
...(data !== undefined && { data }),
...(annotations !== undefined && { annotations }),
...(toolInvocations !== undefined && { toolInvocations }),
+ ...(parts !== undefined && { parts }),
}),
);
@@ -185,8 +194,8 @@ const processStreamedResponse = async (
onUpdate({ message, data, replaceLastMessage }) {
mutate([
...(replaceLastMessage
- ? chatRequest.messages.slice(0, chatRequest.messages.length - 1)
- : chatRequest.messages),
+ ? chatMessages.slice(0, chatMessages.length - 1)
+ : chatMessages),
message,
]);
@@ -198,7 +207,7 @@ const processStreamedResponse = async (
onFinish,
generateId,
fetch,
- lastMessage: chatRequest.messages[chatRequest.messages.length - 1],
+ lastMessage: chatMessages[chatMessages.length - 1],
});
};
@@ -234,12 +243,14 @@ export function useChat(
chatCache.get(chatKey()) ?? useChatOptions().initialMessages?.() ?? [],
);
- const [messagesStore, setMessagesStore] = createStore(_messages());
+ const [messagesStore, setMessagesStore] = createStore(
+ fillMessageParts(_messages()),
+ );
createEffect(() => {
- setMessagesStore(reconcile(_messages(), { merge: true }));
+ setMessagesStore(reconcile(fillMessageParts(_messages()), { merge: true }));
});
- const mutate = (messages: Message[]) => {
+ const mutate = (messages: UIMessage[]) => {
chatCache.set(chatKey(), messages);
};
@@ -249,9 +260,9 @@ export function useChat(
);
const [isLoading, setIsLoading] = createSignal(false);
- let messagesRef: Message[] = _messages() || [];
+ let messagesRef: UIMessage[] = fillMessageParts(_messages()) || [];
createEffect(() => {
- messagesRef = _messages() || [];
+ messagesRef = fillMessageParts(_messages()) || [];
});
let abortController: AbortController | null = null;
@@ -323,23 +334,13 @@ export function useChat(
// auto-submit when all tool calls in the last assistant message have results:
const messages = messagesRef;
- const lastMessage = messages[messages.length - 1];
if (
- // ensure there is a last message:
- lastMessage != null &&
- // ensure we actually have new steps (to prevent infinite loops in case of errors):
- (messages.length > messageCount ||
- extractMaxToolInvocationStep(lastMessage.toolInvocations) !==
- maxStep) &&
- // check if the feature is enabled:
- maxSteps > 1 &&
- // check that next step is possible:
- isAssistantMessageWithCompletedToolCalls(lastMessage) &&
- // check that assistant has not answered yet:
- !lastMessage.content && // empty string or undefined
- // limit the number of automatic steps:
- (extractMaxToolInvocationStep(lastMessage.toolInvocations) ?? 0) <
- maxSteps
+ shouldResubmitMessages({
+ originalMaxToolInvocationStep: maxStep,
+ originalMessageCount: messageCount,
+ maxSteps,
+ messages,
+ })
) {
await triggerRequest({ messages });
}
@@ -353,15 +354,17 @@ export function useChat(
experimental_attachments,
);
- const newMessage = {
+ const messages = messagesRef.concat({
...message,
id: message.id ?? generateId()(),
+ createdAt: message.createdAt ?? new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
- };
+ parts: getMessageParts(message),
+ });
return triggerRequest({
- messages: messagesRef.concat(newMessage as Message),
+ messages,
headers,
body,
data,
@@ -404,8 +407,9 @@ export function useChat(
messagesArg = messagesArg(messagesRef);
}
- mutate(messagesArg);
- messagesRef = messagesArg;
+ const messagesWithParts = fillMessageParts(messagesArg);
+ mutate(messagesWithParts);
+ messagesRef = messagesWithParts;
};
const setData = (
@@ -454,6 +458,7 @@ export function useChat(
createdAt: new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
+ parts: [{ type: 'text', text: inputValue }],
}),
headers: options.headers,
body: options.body,
@@ -474,34 +479,20 @@ export function useChat(
toolCallId: string;
result: any;
}) => {
- const messagesSnapshot = _messages() ?? [];
-
- const updatedMessages = messagesSnapshot.map((message, index, arr) =>
- // update the tool calls in the last assistant message:
- index === arr.length - 1 &&
- message.role === 'assistant' &&
- message.toolInvocations
- ? {
- ...message,
- toolInvocations: message.toolInvocations.map(toolInvocation =>
- toolInvocation.toolCallId === toolCallId
- ? {
- ...toolInvocation,
- result,
- state: 'result' as const,
- }
- : toolInvocation,
- ),
- }
- : message,
- );
+ const currentMessages = messagesRef ?? [];
- mutate(updatedMessages);
+ updateToolCallResult({
+ messages: currentMessages,
+ toolCallId,
+ toolResult: result,
+ });
+
+ mutate(currentMessages);
// auto-submit when all tool calls in the last assistant message have results:
- const lastMessage = updatedMessages[updatedMessages.length - 1];
+ const lastMessage = currentMessages[currentMessages.length - 1];
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
- triggerRequest({ messages: updatedMessages });
+ triggerRequest({ messages: currentMessages });
}
};
@@ -524,17 +515,3 @@ export function useChat(
addToolResult,
};
}
-
-/**
-Check if the message is an assistant message with completed tool calls.
-The message must have at least one tool invocation and all tool invocations
-must have a result.
- */
-function isAssistantMessageWithCompletedToolCalls(message: Message) {
- return (
- message.role === 'assistant' &&
- message.toolInvocations &&
- message.toolInvocations.length > 0 &&
- message.toolInvocations.every(toolInvocation => 'result' in toolInvocation)
- );
-}
diff --git a/packages/solid/src/use-chat.ui.test.tsx b/packages/solid/src/use-chat.ui.test.tsx
index ab6686eaabf7..cb3efb2c33e9 100644
--- a/packages/solid/src/use-chat.ui.test.tsx
+++ b/packages/solid/src/use-chat.ui.test.tsx
@@ -16,14 +16,8 @@ import { useChat } from './use-chat';
describe('file attachments with data url', () => {
const TestComponent = () => {
- const {
- messages,
- handleSubmit,
- handleInputChange,
- isLoading,
- input,
- setInput,
- } = useChat();
+ const { messages, handleSubmit, handleInputChange, isLoading, input } =
+ useChat();
const [attachments, setAttachments] = createSignal();
let fileInputRef: HTMLInputElement | undefined;
@@ -141,6 +135,7 @@ describe('file attachments with data url', () => {
url: 'data:text/plain;base64,dGVzdCBmaWxlIGNvbnRlbnQ=',
},
],
+ parts: [{ text: 'Message with text attachment', type: 'text' }],
},
],
});
@@ -379,6 +374,7 @@ describe('data protocol stream', () => {
createdAt: expect.any(Date),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'stop',
@@ -517,6 +513,7 @@ describe('text stream', () => {
createdAt: expect.any(Date),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'unknown',
@@ -1200,7 +1197,13 @@ describe('reload', () => {
expect(await call(1).getRequestBodyJson()).toStrictEqual({
id: expect.any(String),
- messages: [{ content: 'hi', role: 'user' }],
+ messages: [
+ {
+ content: 'hi',
+ role: 'user',
+ parts: [{ text: 'hi', type: 'text' }],
+ },
+ ],
data: { 'test-data-key': 'test-data-value' },
'request-body-key': 'request-body-value',
});
diff --git a/packages/svelte/src/use-assistant.ts b/packages/svelte/src/use-assistant.ts
index 86c74a9a9bfc..d518f99c5f3a 100644
--- a/packages/svelte/src/use-assistant.ts
+++ b/packages/svelte/src/use-assistant.ts
@@ -152,6 +152,7 @@ export function useAssistant({
id: value.id,
role: value.role,
content: value.content[0].text.value,
+ parts: [],
},
]);
},
@@ -186,6 +187,7 @@ export function useAssistant({
role: 'data',
content: '',
data: value.data,
+ parts: [],
},
]);
},
@@ -231,7 +233,10 @@ export function useAssistant({
const inputValue = get(input);
if (!inputValue) return;
- await append({ role: 'user', content: inputValue }, requestOptions);
+ await append(
+ { role: 'user', content: inputValue, parts: [] },
+ requestOptions,
+ );
}
return {
diff --git a/packages/svelte/src/use-chat.ts b/packages/svelte/src/use-chat.ts
index 64a4d2cdef30..f332f57d643e 100644
--- a/packages/svelte/src/use-chat.ts
+++ b/packages/svelte/src/use-chat.ts
@@ -7,12 +7,18 @@ import type {
JSONValue,
Message,
UseChatOptions as SharedUseChatOptions,
+ UIMessage,
} from '@ai-sdk/ui-utils';
import {
callChatApi,
extractMaxToolInvocationStep,
+ fillMessageParts,
generateId as generateIdFunc,
+ getMessageParts,
+ isAssistantMessageWithCompletedToolCalls,
prepareAttachmentsForRequest,
+ shouldResubmitMessages,
+ updateToolCallResult,
} from '@ai-sdk/ui-utils';
import { useSWR } from 'sswr';
import { Readable, Writable, derived, get, writable } from 'svelte/store';
@@ -31,7 +37,7 @@ By default, it's set to 1, which means that only a single LLM call is made.
export type UseChatHelpers = {
/** Current messages in the chat */
- messages: Readable;
+ messages: Readable;
/** The error object of the API request */
error: Readable;
/**
@@ -93,7 +99,7 @@ export type UseChatHelpers = {
const getStreamedResponse = async (
api: string,
chatRequest: ChatRequest,
- mutate: (messages: Message[]) => void,
+ mutate: (messages: UIMessage[]) => void,
mutateStreamData: (data: JSONValue[] | undefined) => void,
existingData: JSONValue[] | undefined,
extraMetadata: {
@@ -101,7 +107,7 @@ const getStreamedResponse = async (
headers?: Record | Headers;
body?: any;
},
- previousMessages: Message[],
+ previousMessages: UIMessage[],
abortControllerRef: AbortController | null,
generateId: IdGenerator,
streamProtocol: UseChatOptions['streamProtocol'],
@@ -115,11 +121,13 @@ const getStreamedResponse = async (
) => {
// Do an optimistic update to the chat state to show the updated messages
// immediately.
- mutate(chatRequest.messages);
+ const chatMessages = fillMessageParts(chatRequest.messages);
+
+ mutate(chatMessages);
const constructedMessagesPayload = sendExtraMessageFields
- ? chatRequest.messages
- : chatRequest.messages.map(
+ ? chatMessages
+ : chatMessages.map(
({
role,
content,
@@ -127,6 +135,7 @@ const getStreamedResponse = async (
data,
annotations,
toolInvocations,
+ parts,
}) => ({
role,
content,
@@ -136,6 +145,7 @@ const getStreamedResponse = async (
...(data !== undefined && { data }),
...(annotations !== undefined && { annotations }),
...(toolInvocations !== undefined && { toolInvocations }),
+ ...(parts !== undefined && { parts }),
}),
);
@@ -164,8 +174,8 @@ const getStreamedResponse = async (
onUpdate({ message, data, replaceLastMessage }) {
mutate([
...(replaceLastMessage
- ? chatRequest.messages.slice(0, chatRequest.messages.length - 1)
- : chatRequest.messages),
+ ? chatMessages.slice(0, chatMessages.length - 1)
+ : chatMessages),
message,
]);
if (data?.length) {
@@ -176,25 +186,11 @@ const getStreamedResponse = async (
generateId,
onToolCall,
fetch,
- lastMessage: chatRequest.messages[chatRequest.messages.length - 1],
+ lastMessage: chatMessages[chatMessages.length - 1],
});
};
-const store: Record = {};
-
-/**
-Check if the message is an assistant message with completed tool calls.
-The message must have at least one tool invocation and all tool invocations
-must have a result.
- */
-function isAssistantMessageWithCompletedToolCalls(message: Message) {
- return (
- message.role === 'assistant' &&
- message.toolInvocations &&
- message.toolInvocations.length > 0 &&
- message.toolInvocations.every(toolInvocation => 'result' in toolInvocation)
- );
-}
+const store: Record = {};
export function useChat({
api = '/api/chat',
@@ -231,9 +227,9 @@ export function useChat({
data,
mutate: originalMutate,
isLoading: isSWRLoading,
- } = useSWR(key, {
- fetcher: () => store[key] || initialMessages,
- fallbackData: initialMessages,
+ } = useSWR(key, {
+ fetcher: () => store[key] ?? fillMessageParts(initialMessages),
+ fallbackData: fillMessageParts(initialMessages),
});
const streamData = writable(undefined);
@@ -241,15 +237,15 @@ export function useChat({
const loading = writable(false);
// Force the `data` to be `initialMessages` if it's `undefined`.
- data.set(initialMessages);
+ data.set(fillMessageParts(initialMessages));
- const mutate = (data: Message[]) => {
+ const mutate = (data: UIMessage[]) => {
store[key] = data;
return originalMutate(data);
};
// Because of the `fallbackData` option, the `data` will never be `undefined`.
- const messages = data as Writable;
+ const messages = data as Writable;
// Abort controller to cancel the current API call.
let abortController: AbortController | null = null;
@@ -316,24 +312,13 @@ export function useChat({
// auto-submit when all tool calls in the last assistant message have results:
const newMessagesSnapshot = get(messages);
-
- const lastMessage = newMessagesSnapshot[newMessagesSnapshot.length - 1];
if (
- // ensure there is a last message:
- lastMessage != null &&
- // ensure we actually have new messages (to prevent infinite loops in case of errors):
- (newMessagesSnapshot.length > messageCount ||
- extractMaxToolInvocationStep(lastMessage.toolInvocations) !==
- maxStep) &&
- // check if the feature is enabled:
- maxSteps > 1 &&
- // check that next step is possible:
- isAssistantMessageWithCompletedToolCalls(lastMessage) &&
- // check that assistant has not answered yet:
- !lastMessage.content && // empty string or undefined
- // limit the number of automatic steps:
- (extractMaxToolInvocationStep(lastMessage.toolInvocations) ?? 0) <
- maxSteps
+ shouldResubmitMessages({
+ originalMaxToolInvocationStep: maxStep,
+ originalMessageCount: messageCount,
+ maxSteps,
+ messages: newMessagesSnapshot,
+ })
) {
await triggerRequest({ messages: newMessagesSnapshot });
}
@@ -343,10 +328,6 @@ export function useChat({
message: Message | CreateMessage,
{ data, headers, body, experimental_attachments }: ChatRequestOptions = {},
) => {
- if (!message.id) {
- message.id = generateId();
- }
-
const attachmentsForRequest = await prepareAttachmentsForRequest(
experimental_attachments,
);
@@ -354,9 +335,12 @@ export function useChat({
return triggerRequest({
messages: get(messages).concat({
...message,
+ id: message.id ?? generateId(),
+ createdAt: message.createdAt ?? new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
- } as Message),
+ parts: getMessageParts(message),
+ } as UIMessage),
headers,
body,
data,
@@ -400,7 +384,7 @@ export function useChat({
messagesArg = messagesArg(get(messages));
}
- mutate(messagesArg);
+ mutate(fillMessageParts(messagesArg));
};
const setData = (
@@ -439,7 +423,8 @@ export function useChat({
createdAt: new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
- } as Message),
+ parts: [{ type: 'text', text: inputValue }],
+ }),
body: options.body,
headers: options.headers,
data: options.data,
@@ -463,33 +448,20 @@ export function useChat({
result: any;
}) => {
const messagesSnapshot = get(messages) ?? [];
- const updatedMessages = messagesSnapshot.map((message, index, arr) =>
- // update the tool calls in the last assistant message:
- index === arr.length - 1 &&
- message.role === 'assistant' &&
- message.toolInvocations
- ? {
- ...message,
- toolInvocations: message.toolInvocations.map(toolInvocation =>
- toolInvocation.toolCallId === toolCallId
- ? {
- ...toolInvocation,
- result,
- state: 'result' as const,
- }
- : toolInvocation,
- ),
- }
- : message,
- );
- messages.set(updatedMessages);
+ updateToolCallResult({
+ messages: messagesSnapshot,
+ toolCallId,
+ toolResult: result,
+ });
+
+ messages.set(messagesSnapshot);
// auto-submit when all tool calls in the last assistant message have results:
- const lastMessage = updatedMessages[updatedMessages.length - 1];
+ const lastMessage = messagesSnapshot[messagesSnapshot.length - 1];
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
- triggerRequest({ messages: updatedMessages });
+ triggerRequest({ messages: messagesSnapshot });
}
};
diff --git a/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap b/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap
index e0d8e57732cf..2beca25cc671 100644
--- a/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap
+++ b/packages/ui-utils/src/__snapshots__/process-chat-response.test.ts.snap
@@ -1,5 +1,414 @@
// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
+exports[`scenario: delayed message annotations in onFinish > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "annotations": [
+ {
+ "example": "annotation",
+ },
+ ],
+ "content": "text",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "text",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: delayed message annotations in onFinish > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "text",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "text",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ {
+ "example": "annotation",
+ },
+ ],
+ "content": "text",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "text",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: message annotations in onChunk > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "annotations": [
+ "annotation1",
+ "annotation2",
+ ],
+ "content": "t1t2",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "t1t2",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: message annotations in onChunk > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation1",
+ ],
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation1",
+ ],
+ "content": "t1",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "t1",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation1",
+ "annotation2",
+ ],
+ "content": "t1",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "t1",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-3",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation1",
+ "annotation2",
+ ],
+ "content": "t1t2",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "t1t2",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-4",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: message annotations with existing assistant lastMessage > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "annotations": [
+ "annotation0",
+ "annotation1",
+ ],
+ "content": "t1",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "text": "t1",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: message annotations with existing assistant lastMessage > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation0",
+ "annotation1",
+ ],
+ "content": "",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [],
+ "revisionId": "id-0",
+ "role": "assistant",
+ },
+ "replaceLastMessage": true,
+ },
+ {
+ "data": [],
+ "message": {
+ "annotations": [
+ "annotation0",
+ "annotation1",
+ ],
+ "content": "t1",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "text": "t1",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": true,
+ },
+]
+`;
+
+exports[`scenario: onToolCall is executed > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: onToolCall is executed > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: server provides message ids > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "step_123",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: server provides message ids > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "step_123",
+ "parts": [
+ {
+ "text": "Hello, ",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "step_123",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
exports[`scenario: server provides reasoning > should call the onFinish function with the correct arguments 1`] = `
[
{
@@ -8,6 +417,16 @@ exports[`scenario: server provides reasoning > should call the onFinish function
"content": "Hello, world!",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
+ "type": "reasoning",
+ },
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
"role": "assistant",
},
@@ -28,6 +447,12 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation",
+ "type": "reasoning",
+ },
+ ],
"reasoning": "I will open the conversation",
"revisionId": "id-1",
"role": "assistant",
@@ -40,6 +465,12 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. ",
+ "type": "reasoning",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. ",
"revisionId": "id-2",
"role": "assistant",
@@ -52,6 +483,12 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. Once the user has relaxed,",
+ "type": "reasoning",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. Once the user has relaxed,",
"revisionId": "id-3",
"role": "assistant",
@@ -64,6 +501,12 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
+ "type": "reasoning",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
"revisionId": "id-4",
"role": "assistant",
@@ -76,6 +519,16 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "Hello, ",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
+ "type": "reasoning",
+ },
+ {
+ "text": "Hello, ",
+ "type": "text",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
"revisionId": "id-5",
"role": "assistant",
@@ -88,6 +541,16 @@ exports[`scenario: server provides reasoning > should call the update function w
"content": "Hello, world!",
"createdAt": 2023-01-01T00:00:00.000Z,
"id": "step_123",
+ "parts": [
+ {
+ "reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
+ "type": "reasoning",
+ },
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
"reasoning": "I will open the conversation with witty banter. Once the user has relaxed, I will pry for valuable information.",
"revisionId": "id-6",
"role": "assistant",
@@ -96,3 +559,1399 @@ exports[`scenario: server provides reasoning > should call the update function w
},
]
`;
+
+exports[`scenario: server-side continue roundtrip > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 7,
+ "promptTokens": 14,
+ "totalTokens": 21,
+ },
+ },
+]
+`;
+
+exports[`scenario: server-side continue roundtrip > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "The weather in London ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "The weather in London ",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 7,
+ "promptTokens": 14,
+ "totalTokens": 21,
+ },
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-3",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with existing assistant message > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 7,
+ "promptTokens": 14,
+ "totalTokens": 21,
+ },
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with existing assistant message > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-0",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": true,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": true,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-02T00:00:00.000Z,
+ "id": "original-id",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {},
+ "result": {
+ "location": "Berlin",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id-original",
+ "toolName": "tool-name-original",
+ },
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 1,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": true,
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with multiple assistant reasoning > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "reasoning": "I know know the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.I know know the weather in London.",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 7,
+ "promptTokens": 14,
+ "totalTokens": 21,
+ },
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with multiple assistant reasoning > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will ",
+ "type": "reasoning",
+ },
+ ],
+ "reasoning": "I will ",
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.",
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.",
+ "revisionId": "id-3",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.",
+ "revisionId": "id-4",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "reasoning": "I know know the weather in London.",
+ "type": "reasoning",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.I know know the weather in London.",
+ "revisionId": "id-5",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "reasoning": "I will use a tool to get the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "reasoning": "I know know the weather in London.",
+ "type": "reasoning",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "reasoning": "I will use a tool to get the weather in London.I know know the weather in London.",
+ "revisionId": "id-6",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with multiple assistant texts > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "I will use a tool to get the weather in London.The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 7,
+ "promptTokens": 14,
+ "totalTokens": 21,
+ },
+ },
+]
+`;
+
+exports[`scenario: server-side tool roundtrip with multiple assistant texts > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "I will ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will ",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "I will use a tool to get the weather in London.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "I will use a tool to get the weather in London.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-3",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "I will use a tool to get the weather in London.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-4",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "I will use a tool to get the weather in London.The weather in London ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London ",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-5",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "I will use a tool to get the weather in London.The weather in London is sunny.",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "I will use a tool to get the weather in London.",
+ "type": "text",
+ },
+ {
+ "toolInvocation": {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ "type": "tool-invocation",
+ },
+ {
+ "text": "The weather in London is sunny.",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-6",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "city": "London",
+ },
+ "result": {
+ "weather": "sunny",
+ },
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-id",
+ "toolName": "tool-name",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: simple text response > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: simple text response > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "Hello, ",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`scenario: tool call streaming > should call the onFinish function with the correct arguments 1`] = `
+[
+ {
+ "finishReason": "stop",
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "testArg": "test-value",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "testArg": "test-value",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "usage": {
+ "completionTokens": 5,
+ "promptTokens": 10,
+ "totalTokens": 15,
+ },
+ },
+]
+`;
+
+exports[`scenario: tool call streaming > should call the update function with the correct arguments 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": undefined,
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-1",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": undefined,
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "testArg": "t",
+ },
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-2",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "testArg": "t",
+ },
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "testArg": "test-value",
+ },
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-3",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "testArg": "test-value",
+ },
+ "state": "partial-call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "testArg": "test-value",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-4",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "testArg": "test-value",
+ },
+ "state": "call",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "id-0",
+ "parts": [
+ {
+ "toolInvocation": {
+ "args": {
+ "testArg": "test-value",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ "type": "tool-invocation",
+ },
+ ],
+ "revisionId": "id-5",
+ "role": "assistant",
+ "toolInvocations": [
+ {
+ "args": {
+ "testArg": "test-value",
+ },
+ "result": "test-result",
+ "state": "result",
+ "step": 0,
+ "toolCallId": "tool-call-0",
+ "toolName": "test-tool",
+ },
+ ],
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
diff --git a/packages/ui-utils/src/__snapshots__/process-chat-text-response.test.ts.snap b/packages/ui-utils/src/__snapshots__/process-chat-text-response.test.ts.snap
new file mode 100644
index 000000000000..74f33e7f5b1f
--- /dev/null
+++ b/packages/ui-utils/src/__snapshots__/process-chat-text-response.test.ts.snap
@@ -0,0 +1,176 @@
+// Vitest Snapshot v1, https://vitest.dev/guide/snapshot.html
+
+exports[`processChatTextResponse > scenario: multiple short chunks > should call the onFinish function after the stream ends 1`] = `
+[
+ {
+ "content": "ABCDE",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "ABCDE",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`processChatTextResponse > scenario: multiple short chunks > should call the update function with correct arguments for each chunk 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "A",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "A",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "AB",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "AB",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "ABC",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "ABC",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "ABCD",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "ABCD",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "ABCDE",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "ABCDE",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
+
+exports[`processChatTextResponse > scenario: no text chunks > should call the onFinish function after the stream ends 1`] = `
+[
+ {
+ "content": "",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`processChatTextResponse > scenario: no text chunks > should call the update function with correct arguments for each chunk 1`] = `[]`;
+
+exports[`processChatTextResponse > scenario: simple text response > should call the onFinish function after the stream ends 1`] = `
+[
+ {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+]
+`;
+
+exports[`processChatTextResponse > scenario: simple text response > should call the update function with correct arguments for each chunk 1`] = `
+[
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, ",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "Hello, ",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+ {
+ "data": [],
+ "message": {
+ "content": "Hello, world!",
+ "createdAt": 2023-01-01T00:00:00.000Z,
+ "id": "test-id",
+ "parts": [
+ {
+ "text": "Hello, world!",
+ "type": "text",
+ },
+ ],
+ "role": "assistant",
+ },
+ "replaceLastMessage": false,
+ },
+]
+`;
diff --git a/packages/ui-utils/src/call-chat-api.ts b/packages/ui-utils/src/call-chat-api.ts
index 56610e892238..80fc1636f8eb 100644
--- a/packages/ui-utils/src/call-chat-api.ts
+++ b/packages/ui-utils/src/call-chat-api.ts
@@ -1,6 +1,6 @@
import { processChatResponse } from './process-chat-response';
-import { processTextStream } from './process-text-stream';
-import { IdGenerator, JSONValue, Message, UseChatOptions } from './types';
+import { processChatTextResponse } from './process-chat-text-response';
+import { IdGenerator, JSONValue, UIMessage, UseChatOptions } from './types';
// use function to allow for mocking in tests:
const getOriginalFetch = () => fetch;
@@ -30,7 +30,7 @@ export async function callChatApi({
restoreMessagesOnFailure: () => void;
onResponse: ((response: Response) => void | Promise) | undefined;
onUpdate: (options: {
- message: Message;
+ message: UIMessage;
data: JSONValue[] | undefined;
replaceLastMessage: boolean;
}) => void;
@@ -38,7 +38,7 @@ export async function callChatApi({
onToolCall: UseChatOptions['onToolCall'];
generateId: IdGenerator;
fetch: ReturnType | undefined;
- lastMessage: Message | undefined;
+ lastMessage: UIMessage | undefined;
}) {
const response = await fetch(api, {
method: 'POST',
@@ -75,31 +75,11 @@ export async function callChatApi({
switch (streamProtocol) {
case 'text': {
- const resultMessage: Message = {
- id: generateId(),
- createdAt: new Date(),
- role: 'assistant' as const,
- content: '',
- };
-
- await processTextStream({
+ await processChatTextResponse({
stream: response.body,
- onTextPart: chunk => {
- resultMessage.content += chunk;
-
- // note: creating a new message object is required for Solid.js streaming
- onUpdate({
- message: { ...resultMessage },
- data: [],
- replaceLastMessage: false,
- });
- },
- });
-
- // in text mode, we don't have usage information or finish reason:
- onFinish?.(resultMessage, {
- usage: { completionTokens: NaN, promptTokens: NaN, totalTokens: NaN },
- finishReason: 'unknown',
+ update: onUpdate,
+ onFinish,
+ generateId,
});
return;
}
diff --git a/packages/ui-utils/src/fill-message-parts.ts b/packages/ui-utils/src/fill-message-parts.ts
new file mode 100644
index 000000000000..84e9d3b36273
--- /dev/null
+++ b/packages/ui-utils/src/fill-message-parts.ts
@@ -0,0 +1,9 @@
+import { getMessageParts } from './get-message-parts';
+import { Message, UIMessage } from './types';
+
+export function fillMessageParts(messages: Message[]): UIMessage[] {
+ return messages.map(message => ({
+ ...message,
+ parts: getMessageParts(message),
+ }));
+}
diff --git a/packages/ui-utils/src/get-message-parts.ts b/packages/ui-utils/src/get-message-parts.ts
new file mode 100644
index 000000000000..46421cbc37b4
--- /dev/null
+++ b/packages/ui-utils/src/get-message-parts.ts
@@ -0,0 +1,29 @@
+import {
+ CreateMessage,
+ Message,
+ ReasoningUIPart,
+ TextUIPart,
+ ToolInvocationUIPart,
+ UIMessage,
+} from './types';
+
+export function getMessageParts(
+ message: Message | CreateMessage | UIMessage,
+): (TextUIPart | ReasoningUIPart | ToolInvocationUIPart)[] {
+ return (
+ message.parts ?? [
+ ...(message.toolInvocations
+ ? message.toolInvocations.map(toolInvocation => ({
+ type: 'tool-invocation' as const,
+ toolInvocation,
+ }))
+ : []),
+ ...(message.reasoning
+ ? [{ type: 'reasoning' as const, reasoning: message.reasoning }]
+ : []),
+ ...(message.content
+ ? [{ type: 'text' as const, text: message.content }]
+ : []),
+ ]
+ );
+}
diff --git a/packages/ui-utils/src/index.ts b/packages/ui-utils/src/index.ts
index 8ca86a2bc4c4..12ba2071ccb6 100644
--- a/packages/ui-utils/src/index.ts
+++ b/packages/ui-utils/src/index.ts
@@ -20,6 +20,8 @@ export type { DataStreamPart, DataStreamString } from './data-stream-parts';
export { getTextFromDataUrl } from './data-url';
export type { DeepPartial } from './deep-partial';
export { extractMaxToolInvocationStep } from './extract-max-tool-invocation-step';
+export { fillMessageParts } from './fill-message-parts';
+export { getMessageParts } from './get-message-parts';
export { isDeepEqualData } from './is-deep-equal-data';
export { parsePartialJson } from './parse-partial-json';
export { prepareAttachmentsForRequest } from './prepare-attachments-for-request';
@@ -28,4 +30,9 @@ export { processDataStream } from './process-data-stream';
export { processTextStream } from './process-text-stream';
export { asSchema, jsonSchema } from './schema';
export type { Schema } from './schema';
+export {
+ isAssistantMessageWithCompletedToolCalls,
+ shouldResubmitMessages,
+} from './should-resubmit-messages';
+export { updateToolCallResult } from './update-tool-call-result';
export { zodSchema } from './zod-schema';
diff --git a/packages/ui-utils/src/process-chat-response.test.ts b/packages/ui-utils/src/process-chat-response.test.ts
index 1755506ef5f3..531394567916 100644
--- a/packages/ui-utils/src/process-chat-response.test.ts
+++ b/packages/ui-utils/src/process-chat-response.test.ts
@@ -71,49 +71,11 @@ describe('scenario: simple text response', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: 'Hello, ',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 'Hello, world!',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: 'Hello, world!',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- role: 'assistant',
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -157,109 +119,11 @@ describe('scenario: server-side tool roundtrip', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {
- city: 'London',
- },
- state: 'call',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 0,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {
- city: 'London',
- },
- result: {
- weather: 'sunny',
- },
- state: 'result',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 0,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- id: 'id-0',
- revisionId: 'id-3',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {
- city: 'London',
- },
- result: {
- weather: 'sunny',
- },
- state: 'result',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 0,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- id: 'id-0',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- toolInvocations: [
- {
- args: { city: 'London' },
- result: { weather: 'sunny' },
- state: 'result',
- step: 0,
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- },
- ],
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 7,
- promptTokens: 14,
- totalTokens: 21,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -314,84 +178,10 @@ describe('scenario: server-side tool roundtrip with existing assistant message',
toolName: 'tool-name-original',
},
],
- },
- });
- });
-
- it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- id: 'original-id',
- revisionId: 'id-0',
- role: 'assistant',
- content: '',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {},
- result: { location: 'Berlin' },
- state: 'result',
- step: 0,
- toolCallId: 'tool-call-id-original',
- toolName: 'tool-name-original',
- },
- {
- args: {
- city: 'London',
- },
- state: 'call',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 1,
- },
- ],
- },
- data: [],
- replaceLastMessage: true,
- },
- {
- message: {
- id: 'original-id',
- revisionId: 'id-1',
- role: 'assistant',
- content: '',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {},
- result: { location: 'Berlin' },
- state: 'result',
- step: 0,
- toolCallId: 'tool-call-id-original',
- toolName: 'tool-name-original',
- },
- {
- args: {
- city: 'London',
- },
- result: {
- weather: 'sunny',
- },
- state: 'result',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 1,
- },
- ],
- },
- data: [],
- replaceLastMessage: true,
- },
- {
- message: {
- id: 'original-id',
- revisionId: 'id-2',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- toolInvocations: [
- {
+ parts: [
+ {
+ type: 'tool-invocation',
+ toolInvocation: {
args: {},
result: { location: 'Berlin' },
state: 'result',
@@ -399,61 +189,123 @@ describe('scenario: server-side tool roundtrip with existing assistant message',
toolCallId: 'tool-call-id-original',
toolName: 'tool-name-original',
},
- {
- args: {
- city: 'London',
- },
- result: {
- weather: 'sunny',
- },
- state: 'result',
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- step: 1,
- },
- ],
- },
- data: [],
- replaceLastMessage: true,
+ },
+ ],
},
+ });
+ });
+
+ it('should call the update function with the correct arguments', async () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function with the correct arguments', async () => {
+ expect(finishCalls).toMatchSnapshot();
+ });
+});
+
+describe('scenario: server-side tool roundtrip with multiple assistant texts', () => {
+ beforeEach(async () => {
+ const stream = createDataProtocolStream([
+ formatDataStreamPart('text', 'I will '),
+ formatDataStreamPart('text', 'use a tool to get the weather in London.'),
+ formatDataStreamPart('tool_call', {
+ toolCallId: 'tool-call-id',
+ toolName: 'tool-name',
+ args: { city: 'London' },
+ }),
+ formatDataStreamPart('tool_result', {
+ toolCallId: 'tool-call-id',
+ result: { weather: 'sunny' },
+ }),
+ formatDataStreamPart('finish_step', {
+ finishReason: 'tool-calls',
+ usage: { completionTokens: 5, promptTokens: 10 },
+ isContinued: false,
+ }),
+ formatDataStreamPart('text', 'The weather in London '),
+ formatDataStreamPart('text', 'is sunny.'),
+ formatDataStreamPart('finish_step', {
+ finishReason: 'stop',
+ usage: { completionTokens: 2, promptTokens: 4 },
+ isContinued: false,
+ }),
+ formatDataStreamPart('finish_message', {
+ finishReason: 'stop',
+ usage: { completionTokens: 7, promptTokens: 14 },
+ }),
]);
+
+ await processChatResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ lastMessage: undefined,
+ });
+ });
+
+ it('should call the update function with the correct arguments', async () => {
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- id: 'original-id',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- toolInvocations: [
- {
- args: {},
- result: { location: 'Berlin' },
- state: 'result',
- step: 0,
- toolCallId: 'tool-call-id-original',
- toolName: 'tool-name-original',
- },
- {
- args: { city: 'London' },
- result: { weather: 'sunny' },
- state: 'result',
- step: 1,
- toolCallId: 'tool-call-id',
- toolName: 'tool-name',
- },
- ],
- },
+ expect(finishCalls).toMatchSnapshot();
+ });
+});
+
+describe('scenario: server-side tool roundtrip with multiple assistant reasoning', () => {
+ beforeEach(async () => {
+ const stream = createDataProtocolStream([
+ formatDataStreamPart('reasoning', 'I will '),
+ formatDataStreamPart(
+ 'reasoning',
+ 'use a tool to get the weather in London.',
+ ),
+ formatDataStreamPart('tool_call', {
+ toolCallId: 'tool-call-id',
+ toolName: 'tool-name',
+ args: { city: 'London' },
+ }),
+ formatDataStreamPart('tool_result', {
+ toolCallId: 'tool-call-id',
+ result: { weather: 'sunny' },
+ }),
+ formatDataStreamPart('finish_step', {
+ finishReason: 'tool-calls',
+ usage: { completionTokens: 5, promptTokens: 10 },
+ isContinued: false,
+ }),
+ formatDataStreamPart('reasoning', 'I know know the weather in London.'),
+ formatDataStreamPart('text', 'The weather in London is sunny.'),
+ formatDataStreamPart('finish_step', {
finishReason: 'stop',
- usage: {
- completionTokens: 7,
- promptTokens: 14,
- totalTokens: 21,
- },
- },
+ usage: { completionTokens: 2, promptTokens: 4 },
+ isContinued: false,
+ }),
+ formatDataStreamPart('finish_message', {
+ finishReason: 'stop',
+ usage: { completionTokens: 7, promptTokens: 14 },
+ }),
]);
+
+ await processChatResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ lastMessage: undefined,
+ });
+ });
+
+ it('should call the update function with the correct arguments', async () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function with the correct arguments', async () => {
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -489,49 +341,11 @@ describe('scenario: server-side continue roundtrip', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- content: 'The weather in London ',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- id: 'id-0',
- role: 'assistant',
- content: 'The weather in London is sunny.',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 7,
- promptTokens: 14,
- totalTokens: 21,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -567,51 +381,11 @@ describe('scenario: delayed message annotations in onFinish', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: 'text',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 'text',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- annotations: [{ example: 'annotation' }],
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: 'text',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- role: 'assistant',
- annotations: [{ example: 'annotation' }],
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -644,76 +418,11 @@ describe('scenario: message annotations in onChunk', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- annotations: ['annotation1'],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 't1',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- annotations: ['annotation1'],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 't1',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-3',
- role: 'assistant',
- annotations: ['annotation1', 'annotation2'],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 't1t2',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-4',
- role: 'assistant',
- annotations: ['annotation1', 'annotation2'],
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: 't1t2',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- role: 'assistant',
- annotations: ['annotation1', 'annotation2'],
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -745,57 +454,17 @@ describe('scenario: message annotations with existing assistant lastMessage', ()
createdAt: new Date('2023-01-02T00:00:00.000Z'),
content: '',
annotations: ['annotation0'],
+ parts: [],
},
});
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- id: 'original-id',
- revisionId: 'id-0',
- role: 'assistant',
- annotations: ['annotation0', 'annotation1'],
- },
- data: [],
- replaceLastMessage: true,
- },
- {
- message: {
- content: 't1',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- id: 'original-id',
- revisionId: 'id-1',
- role: 'assistant',
- annotations: ['annotation0', 'annotation1'],
- },
- data: [],
- replaceLastMessage: true,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: 't1',
- createdAt: new Date('2023-01-02T00:00:00.000Z'),
- id: 'original-id',
- role: 'assistant',
- annotations: ['annotation0', 'annotation1'],
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -845,148 +514,11 @@ describe('scenario: tool call streaming', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-1',
- role: 'assistant',
- toolInvocations: [
- {
- state: 'partial-call',
- step: 0,
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- args: undefined,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-2',
- role: 'assistant',
- toolInvocations: [
- {
- args: {
- testArg: 't',
- },
- state: 'partial-call',
- step: 0,
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-3',
- role: 'assistant',
- toolInvocations: [
- {
- args: {
- testArg: 'test-value',
- },
- state: 'partial-call',
- step: 0,
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-4',
- role: 'assistant',
- toolInvocations: [
- {
- args: {
- testArg: 'test-value',
- },
- state: 'call',
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- step: 0,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- revisionId: 'id-5',
- role: 'assistant',
- toolInvocations: [
- {
- args: {
- testArg: 'test-value',
- },
- result: 'test-result',
- state: 'result',
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- step: 0,
- },
- ],
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: '',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'id-0',
- role: 'assistant',
- toolInvocations: [
- {
- args: {
- testArg: 'test-value',
- },
- result: 'test-result',
- state: 'result',
- toolCallId: 'tool-call-0',
- toolName: 'test-tool',
- step: 0,
- },
- ],
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -1018,49 +550,11 @@ describe('scenario: server provides message ids', () => {
});
it('should call the update function with the correct arguments', async () => {
- expect(updateCalls).toStrictEqual([
- {
- message: {
- content: 'Hello, ',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'step_123',
- revisionId: 'id-1',
- role: 'assistant',
- },
- data: [],
- replaceLastMessage: false,
- },
- {
- message: {
- content: 'Hello, world!',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'step_123',
- revisionId: 'id-2',
- role: 'assistant',
- },
- data: [],
- replaceLastMessage: false,
- },
- ]);
+ expect(updateCalls).toMatchSnapshot();
});
it('should call the onFinish function with the correct arguments', async () => {
- expect(finishCalls).toStrictEqual([
- {
- message: {
- content: 'Hello, world!',
- createdAt: new Date('2023-01-01T00:00:00.000Z'),
- id: 'step_123',
- role: 'assistant',
- },
- finishReason: 'stop',
- usage: {
- completionTokens: 5,
- promptTokens: 10,
- totalTokens: 15,
- },
- },
- ]);
+ expect(finishCalls).toMatchSnapshot();
});
});
@@ -1106,3 +600,42 @@ describe('scenario: server provides reasoning', () => {
expect(finishCalls).toMatchSnapshot();
});
});
+
+describe('scenario: onToolCall is executed', () => {
+ beforeEach(async () => {
+ const stream = createDataProtocolStream([
+ formatDataStreamPart('tool_call', {
+ toolCallId: 'tool-call-id',
+ toolName: 'tool-name',
+ args: { city: 'London' },
+ }),
+ formatDataStreamPart('finish_step', {
+ finishReason: 'tool-calls',
+ usage: { completionTokens: 5, promptTokens: 10 },
+ isContinued: false,
+ }),
+ formatDataStreamPart('finish_message', {
+ finishReason: 'stop',
+ usage: { completionTokens: 5, promptTokens: 10 },
+ }),
+ ]);
+
+ await processChatResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ lastMessage: undefined,
+ onToolCall: vi.fn().mockResolvedValue('test-result'),
+ });
+ });
+
+ it('should call the update function with the correct arguments', async () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function with the correct arguments', async () => {
+ expect(finishCalls).toMatchSnapshot();
+ });
+});
diff --git a/packages/ui-utils/src/process-chat-response.ts b/packages/ui-utils/src/process-chat-response.ts
index e1ff8bb533bf..32745c4ce253 100644
--- a/packages/ui-utils/src/process-chat-response.ts
+++ b/packages/ui-utils/src/process-chat-response.ts
@@ -1,7 +1,15 @@
import { generateId as generateIdFunction } from '@ai-sdk/provider-utils';
import { parsePartialJson } from './parse-partial-json';
import { processDataStream } from './process-data-stream';
-import type { JSONValue, Message, UseChatOptions } from './types';
+import type {
+ JSONValue,
+ ReasoningUIPart,
+ TextUIPart,
+ ToolInvocation,
+ ToolInvocationUIPart,
+ UIMessage,
+ UseChatOptions,
+} from './types';
import { LanguageModelV1FinishReason } from '@ai-sdk/provider';
import {
calculateLanguageModelUsage,
@@ -19,19 +27,19 @@ export async function processChatResponse({
}: {
stream: ReadableStream;
update: (options: {
- message: Message;
+ message: UIMessage;
data: JSONValue[] | undefined;
replaceLastMessage: boolean;
}) => void;
onToolCall?: UseChatOptions['onToolCall'];
onFinish?: (options: {
- message: Message | undefined;
+ message: UIMessage | undefined;
finishReason: LanguageModelV1FinishReason;
usage: LanguageModelUsage;
}) => void;
generateId?: () => string;
getCurrentDate?: () => Date;
- lastMessage: Message | undefined;
+ lastMessage: UIMessage | undefined;
}) {
const replaceLastMessage = lastMessage?.role === 'assistant';
let step = replaceLastMessage
@@ -42,15 +50,39 @@ export async function processChatResponse({
}, 0) ?? 0)
: 0;
- const message: Message = replaceLastMessage
+ const message: UIMessage = replaceLastMessage
? structuredClone(lastMessage)
: {
id: generateId(),
createdAt: getCurrentDate(),
role: 'assistant',
content: '',
+ parts: [],
};
+ let currentTextPart: TextUIPart | undefined = undefined;
+ let currentReasoningPart: ReasoningUIPart | undefined = undefined;
+
+ function updateToolInvocationPart(
+ toolCallId: string,
+ invocation: ToolInvocation,
+ ) {
+ const part = message.parts.find(
+ part =>
+ part.type === 'tool-invocation' &&
+ part.toolInvocation.toolCallId === toolCallId,
+ ) as ToolInvocationUIPart | undefined;
+
+ if (part != null) {
+ part.toolInvocation = invocation;
+ } else {
+ message.parts.push({
+ type: 'tool-invocation',
+ toolInvocation: invocation,
+ });
+ }
+ }
+
const data: JSONValue[] = [];
// keep list of current message annotations for message
@@ -91,7 +123,7 @@ export async function processChatResponse({
// is updated with SWR (without it, the changes get stuck in SWR and are not
// forwarded to rendering):
revisionId: generateId(),
- } as Message;
+ } as UIMessage;
update({
message: copiedMessage,
@@ -103,10 +135,30 @@ export async function processChatResponse({
await processDataStream({
stream,
onTextPart(value) {
+ if (currentTextPart == null) {
+ currentTextPart = {
+ type: 'text',
+ text: value,
+ };
+ message.parts.push(currentTextPart);
+ } else {
+ currentTextPart.text += value;
+ }
+
message.content += value;
execUpdate();
},
onReasoningPart(value) {
+ if (currentReasoningPart == null) {
+ currentReasoningPart = {
+ type: 'reasoning',
+ reasoning: value,
+ };
+ message.parts.push(currentReasoningPart);
+ } else {
+ currentReasoningPart.reasoning += value;
+ }
+
message.reasoning = (message.reasoning ?? '') + value;
execUpdate();
},
@@ -123,13 +175,17 @@ export async function processChatResponse({
index: message.toolInvocations.length,
};
- message.toolInvocations.push({
+ const invocation = {
state: 'partial-call',
step,
toolCallId: value.toolCallId,
toolName: value.toolName,
args: undefined,
- });
+ } as const;
+
+ message.toolInvocations.push(invocation);
+
+ updateToolInvocationPart(value.toolCallId, invocation);
execUpdate();
},
@@ -140,49 +196,59 @@ export async function processChatResponse({
const { value: partialArgs } = parsePartialJson(partialToolCall.text);
- message.toolInvocations![partialToolCall.index] = {
+ const invocation = {
state: 'partial-call',
step: partialToolCall.step,
toolCallId: value.toolCallId,
toolName: partialToolCall.toolName,
args: partialArgs,
- };
+ } as const;
+
+ message.toolInvocations![partialToolCall.index] = invocation;
+
+ updateToolInvocationPart(value.toolCallId, invocation);
execUpdate();
},
async onToolCallPart(value) {
+ const invocation = {
+ state: 'call',
+ step,
+ ...value,
+ } as const;
+
if (partialToolCalls[value.toolCallId] != null) {
// change the partial tool call to a full tool call
- message.toolInvocations![partialToolCalls[value.toolCallId].index] = {
- state: 'call',
- step,
- ...value,
- };
+ message.toolInvocations![partialToolCalls[value.toolCallId].index] =
+ invocation;
} else {
if (message.toolInvocations == null) {
message.toolInvocations = [];
}
- message.toolInvocations.push({
- state: 'call',
- step,
- ...value,
- });
+ message.toolInvocations.push(invocation);
}
+ updateToolInvocationPart(value.toolCallId, invocation);
+
// invoke the onToolCall callback if it exists. This is blocking.
// In the future we should make this non-blocking, which
// requires additional state management for error handling etc.
if (onToolCall) {
const result = await onToolCall({ toolCall: value });
if (result != null) {
- // store the result in the tool invocation
- message.toolInvocations![message.toolInvocations!.length - 1] = {
+ const invocation = {
state: 'result',
step,
...value,
result,
- };
+ } as const;
+
+ // store the result in the tool invocation
+ message.toolInvocations![message.toolInvocations!.length - 1] =
+ invocation;
+
+ updateToolInvocationPart(value.toolCallId, invocation);
}
}
@@ -207,11 +273,15 @@ export async function processChatResponse({
);
}
- toolInvocations[toolInvocationIndex] = {
+ const invocation = {
...toolInvocations[toolInvocationIndex],
state: 'result' as const,
...value,
- };
+ } as const;
+
+ toolInvocations[toolInvocationIndex] = invocation;
+
+ updateToolInvocationPart(value.toolCallId, invocation);
execUpdate();
},
@@ -230,6 +300,10 @@ export async function processChatResponse({
},
onFinishStepPart(value) {
step += 1;
+
+ // reset the current text and reasoning parts
+ currentTextPart = value.isContinued ? currentTextPart : undefined;
+ currentReasoningPart = undefined;
},
onStartStepPart(value) {
// keep message id stable when we are updating an existing message:
diff --git a/packages/ui-utils/src/process-chat-text-response.test.ts b/packages/ui-utils/src/process-chat-text-response.test.ts
new file mode 100644
index 000000000000..8a13ae201a88
--- /dev/null
+++ b/packages/ui-utils/src/process-chat-text-response.test.ts
@@ -0,0 +1,110 @@
+import { convertArrayToReadableStream } from '@ai-sdk/provider-utils/test';
+import { beforeEach, describe, expect, it } from 'vitest';
+import { processChatTextResponse } from './process-chat-text-response';
+import { Message } from './types';
+
+function createTextStream(chunks: string[]): ReadableStream {
+ return convertArrayToReadableStream(chunks).pipeThrough(
+ new TextEncoderStream(),
+ );
+}
+
+let updateCalls: Array<{
+ message: Message;
+ data: any[] | undefined;
+ replaceLastMessage: boolean;
+}> = [];
+
+let finishCallMessages: Message[] = [];
+
+const update = (options: {
+ message: Message;
+ data: any[] | undefined;
+ replaceLastMessage: boolean;
+}) => {
+ // clone to preserve the original object
+ updateCalls.push(structuredClone(options));
+};
+
+const onFinish = (message: Message) => {
+ // store the final message
+ finishCallMessages.push(structuredClone(message));
+};
+
+function mockId(): string {
+ // a simple predictable ID generator
+ return 'test-id';
+}
+
+beforeEach(() => {
+ updateCalls = [];
+ finishCallMessages = [];
+});
+
+describe('processChatTextResponse', () => {
+ describe('scenario: simple text response', () => {
+ beforeEach(async () => {
+ const stream = createTextStream(['Hello, ', 'world!']);
+
+ await processChatTextResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: () => mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ });
+ });
+
+ it('should call the update function with correct arguments for each chunk', () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function after the stream ends', () => {
+ expect(finishCallMessages).toMatchSnapshot();
+ });
+ });
+
+ describe('scenario: no text chunks', () => {
+ beforeEach(async () => {
+ const stream = createTextStream([]);
+
+ await processChatTextResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: () => mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ });
+ });
+
+ it('should call the update function with correct arguments for each chunk', () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function after the stream ends', () => {
+ expect(finishCallMessages).toMatchSnapshot();
+ });
+ });
+
+ describe('scenario: multiple short chunks', () => {
+ beforeEach(async () => {
+ const stream = createTextStream(['A', 'B', 'C', 'D', 'E']);
+
+ await processChatTextResponse({
+ stream,
+ update,
+ onFinish,
+ generateId: () => mockId(),
+ getCurrentDate: vi.fn().mockReturnValue(new Date('2023-01-01')),
+ });
+ });
+
+ it('should call the update function with correct arguments for each chunk', () => {
+ expect(updateCalls).toMatchSnapshot();
+ });
+
+ it('should call the onFinish function after the stream ends', () => {
+ expect(finishCallMessages).toMatchSnapshot();
+ });
+ });
+});
diff --git a/packages/ui-utils/src/process-chat-text-response.ts b/packages/ui-utils/src/process-chat-text-response.ts
new file mode 100644
index 000000000000..d673f46d8e1b
--- /dev/null
+++ b/packages/ui-utils/src/process-chat-text-response.ts
@@ -0,0 +1,53 @@
+import { JSONValue } from '@ai-sdk/provider';
+import { generateId as generateIdFunction } from '@ai-sdk/provider-utils';
+import { processTextStream } from './process-text-stream';
+import { TextUIPart, UIMessage, UseChatOptions } from './types';
+
+export async function processChatTextResponse({
+ stream,
+ update,
+ onFinish,
+ getCurrentDate = () => new Date(),
+ generateId = generateIdFunction,
+}: {
+ stream: ReadableStream;
+ update: (options: {
+ message: UIMessage;
+ data: JSONValue[] | undefined;
+ replaceLastMessage: boolean;
+ }) => void;
+ onFinish: UseChatOptions['onFinish'];
+ getCurrentDate?: () => Date;
+ generateId?: () => string;
+}) {
+ const textPart: TextUIPart = { type: 'text', text: '' };
+
+ const resultMessage: UIMessage = {
+ id: generateId(),
+ createdAt: getCurrentDate(),
+ role: 'assistant' as const,
+ content: '',
+ parts: [textPart],
+ };
+
+ await processTextStream({
+ stream,
+ onTextPart: chunk => {
+ resultMessage.content += chunk;
+ textPart.text += chunk;
+
+ // note: creating a new message object is required for Solid.js streaming
+ update({
+ message: { ...resultMessage },
+ data: [],
+ replaceLastMessage: false,
+ });
+ },
+ });
+
+ // in text mode, we don't have usage information or finish reason:
+ onFinish?.(resultMessage, {
+ usage: { completionTokens: NaN, promptTokens: NaN, totalTokens: NaN },
+ finishReason: 'unknown',
+ });
+}
diff --git a/packages/ui-utils/src/should-resubmit-messages.ts b/packages/ui-utils/src/should-resubmit-messages.ts
new file mode 100644
index 000000000000..fddf05f38432
--- /dev/null
+++ b/packages/ui-utils/src/should-resubmit-messages.ts
@@ -0,0 +1,64 @@
+import { extractMaxToolInvocationStep } from './extract-max-tool-invocation-step';
+import { UIMessage } from './types';
+
+export function shouldResubmitMessages({
+ originalMaxToolInvocationStep,
+ originalMessageCount,
+ maxSteps,
+ messages,
+}: {
+ originalMaxToolInvocationStep: number | undefined;
+ originalMessageCount: number;
+ maxSteps: number;
+ messages: UIMessage[];
+}) {
+ const lastMessage = messages[messages.length - 1];
+ return (
+ // check if the feature is enabled:
+ maxSteps > 1 &&
+ // ensure there is a last message:
+ lastMessage != null &&
+ // ensure we actually have new steps (to prevent infinite loops in case of errors):
+ (messages.length > originalMessageCount ||
+ extractMaxToolInvocationStep(lastMessage.toolInvocations) !==
+ originalMaxToolInvocationStep) &&
+ // check that next step is possible:
+ isAssistantMessageWithCompletedToolCalls(lastMessage) &&
+ // check that assistant has not answered yet:
+ !isLastToolInvocationFollowedByText(lastMessage) &&
+ // limit the number of automatic steps:
+ (extractMaxToolInvocationStep(lastMessage.toolInvocations) ?? 0) < maxSteps
+ );
+}
+
+function isLastToolInvocationFollowedByText(message: UIMessage) {
+ let isLastToolInvocationFollowedByText = false;
+
+ message.parts.forEach(part => {
+ if (part.type === 'text') {
+ isLastToolInvocationFollowedByText = true;
+ }
+ if (part.type === 'tool-invocation') {
+ isLastToolInvocationFollowedByText = false;
+ }
+ });
+ return isLastToolInvocationFollowedByText;
+}
+
+/**
+Check if the message is an assistant message with completed tool calls.
+The message must have at least one tool invocation and all tool invocations
+must have a result.
+ */
+export function isAssistantMessageWithCompletedToolCalls(
+ message: UIMessage,
+): message is UIMessage & {
+ role: 'assistant';
+} {
+ return (
+ message.role === 'assistant' &&
+ message.parts
+ .filter(part => part.type === 'tool-invocation')
+ .every(part => 'result' in part.toolInvocation)
+ );
+}
diff --git a/packages/ui-utils/src/test/create-data-protocol-stream.ts b/packages/ui-utils/src/test/create-data-protocol-stream.ts
index 54ea4ce7d06a..f58aaec05906 100644
--- a/packages/ui-utils/src/test/create-data-protocol-stream.ts
+++ b/packages/ui-utils/src/test/create-data-protocol-stream.ts
@@ -4,8 +4,7 @@ import { DataStreamString } from '../data-stream-parts';
export function createDataProtocolStream(
dataPartTexts: DataStreamString[],
): ReadableStream {
- const encoder = new TextEncoder();
- return convertArrayToReadableStream(
- dataPartTexts.map(part => encoder.encode(part)),
+ return convertArrayToReadableStream(dataPartTexts).pipeThrough(
+ new TextEncoderStream(),
);
}
diff --git a/packages/ui-utils/src/types.ts b/packages/ui-utils/src/types.ts
index d1108f87f8c0..285325cf683e 100644
--- a/packages/ui-utils/src/types.ts
+++ b/packages/ui-utils/src/types.ts
@@ -56,12 +56,14 @@ The timestamp of the message.
createdAt?: Date;
/**
-Text content of the message.
+Text content of the message. Use parts when possible.
*/
content: string;
/**
Reasoning for the message.
+
+@deprecated Use `parts` instead.
*/
reasoning?: string;
@@ -70,8 +72,16 @@ Reasoning for the message.
*/
experimental_attachments?: Attachment[];
+ /**
+The 'data' role is deprecated.
+ */
role: 'system' | 'user' | 'assistant' | 'data';
+ /**
+For data messages.
+
+@deprecated Data messages will be removed.
+ */
data?: JSONValue;
/**
@@ -82,10 +92,67 @@ Reasoning for the message.
/**
Tool invocations (that can be tool calls or tool results, depending on whether or not the invocation has finished)
that the assistant made as part of this message.
+
+@deprecated Use `parts` instead.
*/
toolInvocations?: Array;
+
+ /**
+ * The parts of the message. Use this for rendering the message in the UI.
+ *
+ * Assistant messages can have text, reasoning and tool invocation parts.
+ * User messages can have text parts.
+ */
+ // note: optional on the Message type (which serves as input)
+ parts?: Array;
}
+export type UIMessage = Message & {
+ /**
+ * The parts of the message. Use this for rendering the message in the UI.
+ *
+ * Assistant messages can have text, reasoning and tool invocation parts.
+ * User messages can have text parts.
+ */
+ parts: Array;
+};
+
+/**
+ * A text part of a message.
+ */
+export type TextUIPart = {
+ type: 'text';
+
+ /**
+ * The text content.
+ */
+ text: string;
+};
+
+/**
+ * A reasoning part of a message.
+ */
+export type ReasoningUIPart = {
+ type: 'reasoning';
+
+ /**
+ * The reasoning text.
+ */
+ reasoning: string;
+};
+
+/**
+ * A tool invocation part of a message.
+ */
+export type ToolInvocationUIPart = {
+ type: 'tool-invocation';
+
+ /**
+ * The tool invocation.
+ */
+ toolInvocation: ToolInvocation;
+};
+
export type CreateMessage = Omit & {
id?: Message['id'];
};
diff --git a/packages/ui-utils/src/update-tool-call-result.ts b/packages/ui-utils/src/update-tool-call-result.ts
new file mode 100644
index 000000000000..3421b52015ec
--- /dev/null
+++ b/packages/ui-utils/src/update-tool-call-result.ts
@@ -0,0 +1,45 @@
+import { ToolInvocationUIPart, UIMessage } from './types';
+
+/**
+ * Updates the result of a specific tool invocation in the last message of the given messages array.
+ *
+ * @param {object} params - The parameters object.
+ * @param {UIMessage[]} params.messages - An array of messages, from which the last one is updated.
+ * @param {string} params.toolCallId - The unique identifier for the tool invocation to update.
+ * @param {unknown} params.toolResult - The result object to attach to the tool invocation.
+ * @returns {void} This function does not return anything.
+ */
+export function updateToolCallResult({
+ messages,
+ toolCallId,
+ toolResult: result,
+}: {
+ messages: UIMessage[];
+ toolCallId: string;
+ toolResult: unknown;
+}) {
+ const lastMessage = messages[messages.length - 1];
+
+ const invocationPart = lastMessage.parts.find(
+ (part): part is ToolInvocationUIPart =>
+ part.type === 'tool-invocation' &&
+ part.toolInvocation.toolCallId === toolCallId,
+ );
+
+ if (invocationPart == null) {
+ return;
+ }
+
+ const toolResult = {
+ ...invocationPart.toolInvocation,
+ state: 'result' as const,
+ result,
+ };
+
+ invocationPart.toolInvocation = toolResult;
+
+ lastMessage.toolInvocations = lastMessage.toolInvocations?.map(
+ toolInvocation =>
+ toolInvocation.toolCallId === toolCallId ? toolResult : toolInvocation,
+ );
+}
diff --git a/packages/vue/src/use-assistant.ts b/packages/vue/src/use-assistant.ts
index c79af9b1b0f2..d1b513c1a641 100644
--- a/packages/vue/src/use-assistant.ts
+++ b/packages/vue/src/use-assistant.ts
@@ -189,6 +189,7 @@ export function useAssistant({
id: value.id,
content: value.content[0].text.value,
role: value.role,
+ parts: [],
},
];
},
@@ -220,6 +221,7 @@ export function useAssistant({
role: 'data',
content: '',
data: value.data,
+ parts: [],
},
]);
},
@@ -261,6 +263,7 @@ export function useAssistant({
{
role: 'user',
content: input.value,
+ parts: [],
},
requestOptions,
);
diff --git a/packages/vue/src/use-chat.ts b/packages/vue/src/use-chat.ts
index a71d8c9fda08..421faec87f37 100644
--- a/packages/vue/src/use-chat.ts
+++ b/packages/vue/src/use-chat.ts
@@ -1,26 +1,31 @@
import type {
- ChatRequest,
ChatRequestOptions,
CreateMessage,
JSONValue,
Message,
+ UIMessage,
UseChatOptions,
} from '@ai-sdk/ui-utils';
import {
callChatApi,
extractMaxToolInvocationStep,
+ fillMessageParts,
generateId as generateIdFunc,
+ getMessageParts,
+ isAssistantMessageWithCompletedToolCalls,
prepareAttachmentsForRequest,
+ shouldResubmitMessages,
+ updateToolCallResult,
} from '@ai-sdk/ui-utils';
import swrv from 'swrv';
import type { Ref } from 'vue';
-import { ref, unref } from 'vue';
+import { ref, toRaw, unref } from 'vue';
-export type { CreateMessage, Message, UseChatOptions };
+export type { CreateMessage, Message, UIMessage, UseChatOptions };
export type UseChatHelpers = {
/** Current messages in the chat */
- messages: Ref;
+ messages: Ref;
/** The error object of the API request */
error: Ref;
/**
@@ -85,7 +90,7 @@ export type UseChatHelpers = {
// @ts-expect-error - some issues with the default export of useSWRV
const useSWRV = (swrv.default as typeof import('swrv')['default']) || swrv;
-const store: Record = {};
+const store: Record = {};
export function useChat(
{
@@ -105,7 +110,7 @@ export function useChat(
onToolCall,
fetch,
keepLastMessageOnError = true,
- maxSteps,
+ maxSteps = 1,
}: UseChatOptions & {
/**
* Maximum number of sequential LLM calls (steps), e.g. when you use tool calls. Must be at least 1.
@@ -121,9 +126,9 @@ export function useChat(
const chatId = id ?? generateId();
const key = `${api}|${chatId}`;
- const { data: messagesData, mutate: originalMutate } = useSWRV(
+ const { data: messagesData, mutate: originalMutate } = useSWRV(
key,
- () => store[key] || initialMessages,
+ () => store[key] ?? fillMessageParts(initialMessages),
);
const { data: isLoading, mutate: mutateLoading } = useSWRV(
@@ -134,15 +139,15 @@ export function useChat(
isLoading.value ??= false;
// Force the `data` to be `initialMessages` if it's `undefined`.
- messagesData.value ??= initialMessages;
+ messagesData.value ??= fillMessageParts(initialMessages);
- const mutate = (data?: Message[]) => {
+ const mutate = (data?: UIMessage[]) => {
store[key] = data;
return originalMutate();
};
// Because of the `initialData` option, the `data` will never be `undefined`.
- const messages = messagesData as Ref;
+ const messages = messagesData as Ref;
const error = ref(undefined);
// cannot use JSONValue[] in ref because of infinite Typescript recursion:
@@ -167,21 +172,15 @@ export function useChat(
// Do an optimistic update to the chat state to show the updated messages
// immediately.
- const previousMessages = messagesSnapshot;
- mutate(messagesSnapshot);
-
- const chatRequest: ChatRequest = {
- messages: messagesSnapshot,
- body,
- headers,
- data,
- };
+ const previousMessages = fillMessageParts(messagesSnapshot);
+ const chatMessages = previousMessages;
+ mutate(chatMessages);
const existingData = (streamData.value ?? []) as JSONValue[];
const constructedMessagesPayload = sendExtraMessageFields
- ? chatRequest.messages
- : chatRequest.messages.map(
+ ? chatMessages
+ : chatMessages.map(
({
role,
content,
@@ -189,6 +188,7 @@ export function useChat(
data,
annotations,
toolInvocations,
+ parts,
}) => ({
role,
content,
@@ -198,6 +198,7 @@ export function useChat(
...(data !== undefined && { data }),
...(annotations !== undefined && { annotations }),
...(toolInvocations !== undefined && { toolInvocations }),
+ ...(parts !== undefined && { parts }),
}),
);
@@ -206,7 +207,7 @@ export function useChat(
body: {
id: chatId,
messages: constructedMessagesPayload,
- data: chatRequest.data,
+ data,
...unref(metadataBody), // Use unref to unwrap the ref value
...body,
},
@@ -221,8 +222,8 @@ export function useChat(
onUpdate({ message, data, replaceLastMessage }) {
mutate([
...(replaceLastMessage
- ? chatRequest.messages.slice(0, chatRequest.messages.length - 1)
- : chatRequest.messages),
+ ? chatMessages.slice(0, chatMessages.length - 1)
+ : chatMessages),
message,
]);
if (data?.length) {
@@ -239,7 +240,8 @@ export function useChat(
generateId,
onToolCall,
fetch,
- lastMessage: chatRequest.messages[chatRequest.messages.length - 1],
+ // enabled use of structured clone in processChatResponse:
+ lastMessage: recursiveToRaw(chatMessages[chatMessages.length - 1]),
});
} catch (err) {
// Ignore abort errors as they are expected.
@@ -259,24 +261,13 @@ export function useChat(
}
// auto-submit when all tool calls in the last assistant message have results:
- const lastMessage = messages.value[messages.value.length - 1];
if (
- // ensure there is a last message:
- lastMessage != null &&
- // ensure we actually have new messages (to prevent infinite loops in case of errors):
- (messages.value.length > messageCount ||
- extractMaxToolInvocationStep(lastMessage.toolInvocations) !==
- maxStep) &&
- // check if the feature is enabled:
- maxSteps &&
- maxSteps > 1 &&
- // check that next step is possible:
- isAssistantMessageWithCompletedToolCalls(lastMessage) &&
- // check that assistant has not answered yet:
- !lastMessage.content && // empty string or undefined
- // limit the number of automatic steps:
- (extractMaxToolInvocationStep(lastMessage.toolInvocations) ?? 0) <
- maxSteps
+ shouldResubmitMessages({
+ originalMaxToolInvocationStep: maxStep,
+ originalMessageCount: messageCount,
+ maxSteps,
+ messages: messages.value,
+ })
) {
await triggerRequest(messages.value);
}
@@ -294,6 +285,7 @@ export function useChat(
createdAt: message.createdAt ?? new Date(),
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
+ parts: getMessageParts(message),
}),
options,
);
@@ -325,7 +317,7 @@ export function useChat(
messagesArg = messagesArg(messages.value);
}
- mutate(messagesArg);
+ mutate(fillMessageParts(messagesArg));
};
const setData = (
@@ -365,6 +357,7 @@ export function useChat(
role: 'user',
experimental_attachments:
attachmentsForRequest.length > 0 ? attachmentsForRequest : undefined,
+ parts: [{ type: 'text', text: inputValue }],
}),
options,
);
@@ -377,35 +370,22 @@ export function useChat(
result,
}: {
toolCallId: string;
- result: any;
+ result: unknown;
}) => {
- const updatedMessages = messages.value.map((message, index, arr) =>
- // update the tool calls in the last assistant message:
- index === arr.length - 1 &&
- message.role === 'assistant' &&
- message.toolInvocations
- ? {
- ...message,
- toolInvocations: message.toolInvocations.map(toolInvocation =>
- toolInvocation.toolCallId === toolCallId
- ? {
- ...toolInvocation,
- result,
- state: 'result' as const,
- }
- : toolInvocation,
- ),
- }
- : message,
- );
+ const currentMessages = messages.value;
- mutate(updatedMessages);
+ updateToolCallResult({
+ messages: currentMessages,
+ toolCallId,
+ toolResult: result,
+ });
- // auto-submit when all tool calls in the last assistant message have results:
- const lastMessage = updatedMessages[updatedMessages.length - 1];
+ mutate(currentMessages);
+ // auto-submit when all tool calls in the last assistant message have results:
+ const lastMessage = currentMessages[currentMessages.length - 1];
if (isAssistantMessageWithCompletedToolCalls(lastMessage)) {
- triggerRequest(updatedMessages);
+ triggerRequest(currentMessages);
}
};
@@ -426,16 +406,17 @@ export function useChat(
};
}
-/**
-Check if the message is an assistant message with completed tool calls.
-The message must have at least one tool invocation and all tool invocations
-must have a result.
- */
-function isAssistantMessageWithCompletedToolCalls(message: Message) {
- return (
- message.role === 'assistant' &&
- message.toolInvocations &&
- message.toolInvocations.length > 0 &&
- message.toolInvocations.every(toolInvocation => 'result' in toolInvocation)
- );
+// required for use of structured clone
+function recursiveToRaw(inputValue: T): T {
+ if (Array.isArray(inputValue)) {
+ return [...inputValue.map(recursiveToRaw)] as T;
+ } else if (typeof inputValue === 'object' && inputValue !== null) {
+ const clone: any = {};
+ for (const [key, value] of Object.entries(inputValue)) {
+ clone[key] = recursiveToRaw(value);
+ }
+ return clone;
+ } else {
+ return inputValue;
+ }
}
diff --git a/packages/vue/src/use-chat.ui.test.tsx b/packages/vue/src/use-chat.ui.test.tsx
index 5c56ae91b98e..a7839b2d4e0b 100644
--- a/packages/vue/src/use-chat.ui.test.tsx
+++ b/packages/vue/src/use-chat.ui.test.tsx
@@ -185,6 +185,7 @@ describe('data protocol stream', () => {
createdAt: expect.any(String),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'stop',
@@ -255,6 +256,7 @@ describe('text stream', () => {
createdAt: expect.any(String),
role: 'assistant',
content: 'Hello, world.',
+ parts: [{ text: 'Hello, world.', type: 'text' }],
},
options: {
finishReason: 'unknown',
@@ -319,7 +321,13 @@ describe('custom metadata', () => {
expect(await call(0).getRequestBodyJson()).toStrictEqual({
id: expect.any(String),
- messages: [{ content: 'custom metadata component', role: 'user' }],
+ messages: [
+ {
+ content: 'custom metadata component',
+ role: 'user',
+ parts: [{ text: 'custom metadata component', type: 'text' }],
+ },
+ ],
body1: 'value1',
body2: 'value2',
});
@@ -481,7 +489,13 @@ describe('reload', () => {
expect(await call(1).getRequestBodyJson()).toStrictEqual({
id: expect.any(String),
- messages: [{ content: 'hi', role: 'user' }],
+ messages: [
+ {
+ content: 'hi',
+ role: 'user',
+ parts: [{ text: 'hi', type: 'text' }],
+ },
+ ],
data: { 'test-data-key': 'test-data-value' },
'request-body-key': 'request-body-value',
});
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 01952592b736..8e2291e22c66 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -481,6 +481,9 @@ importers:
examples/next-openai:
dependencies:
+ '@ai-sdk/anthropic':
+ specifier: 1.1.6
+ version: link:../../packages/anthropic
'@ai-sdk/deepseek':
specifier: 0.1.8
version: link:../../packages/deepseek