diff --git a/assets/CleanShot_2025-12-03_at_09.07.34_2x-e559659f-ceb7-4b86-879d-a603788b0b56.png b/assets/CleanShot_2025-12-03_at_09.07.34_2x-e559659f-ceb7-4b86-879d-a603788b0b56.png
deleted file mode 100644
index e69de29b..00000000
diff --git a/docs/adapters/anthropic.md b/docs/adapters/anthropic.md
index 422f68bf..38783c70 100644
--- a/docs/adapters/anthropic.md
+++ b/docs/adapters/anthropic.md
@@ -132,7 +132,7 @@ const stream = chat({
### Thinking (Extended Thinking)
-Enable extended thinking with a token budget. This allows Claude to show its reasoning process, which is streamed as `thinking` chunks and displayed as `ThinkingPart` in messages:
+Enable extended thinking with a token budget. This allows Claude to show its reasoning process, which is streamed as `STEP_STARTED` and `STEP_FINISHED` events and displayed as `ThinkingPart` in messages:
```typescript
providerOptions: {
diff --git a/docs/adapters/openai.md b/docs/adapters/openai.md
index 1d1392b0..d2d4d9c9 100644
--- a/docs/adapters/openai.md
+++ b/docs/adapters/openai.md
@@ -126,7 +126,7 @@ const stream = chat({
### Reasoning
-Enable reasoning for models that support it (e.g., GPT-5). This allows the model to show its reasoning process, which is streamed as `thinking` chunks:
+Enable reasoning for models that support it (e.g., GPT-5). This allows the model to show its reasoning process, which is streamed as `STEP_STARTED` and `STEP_FINISHED` events:
```typescript
providerOptions: {
diff --git a/docs/api/ai.md b/docs/api/ai.md
index 6f9240f4..ee4ed9ad 100644
--- a/docs/api/ai.md
+++ b/docs/api/ai.md
@@ -247,33 +247,68 @@ interface ModelMessage {
### `StreamChunk`
+TanStack AI implements the [AG-UI Protocol](https://docs.ag-ui.com/introduction) for streaming. All events share a common base structure:
+
```typescript
-type StreamChunk =
- | ContentStreamChunk
- | ThinkingStreamChunk
- | ToolCallStreamChunk
- | ToolResultStreamChunk
- | DoneStreamChunk
- | ErrorStreamChunk;
-
-interface ThinkingStreamChunk {
- type: "thinking";
- id: string;
- model: string;
+interface BaseEvent {
+ type: EventType;
timestamp: number;
- delta?: string; // Incremental thinking token
+ model?: string;
+ rawEvent?: unknown;
+}
+
+type EventType =
+ | 'RUN_STARTED' // Run lifecycle begins
+ | 'RUN_FINISHED' // Run completed successfully
+ | 'RUN_ERROR' // Error occurred
+ | 'TEXT_MESSAGE_START' // Text message begins
+ | 'TEXT_MESSAGE_CONTENT' // Text content streaming
+ | 'TEXT_MESSAGE_END' // Text message completes
+ | 'TOOL_CALL_START' // Tool invocation begins
+ | 'TOOL_CALL_ARGS' // Tool arguments streaming
+ | 'TOOL_CALL_END' // Tool call completes (with result)
+ | 'STEP_STARTED' // Thinking/reasoning step begins
+ | 'STEP_FINISHED' // Thinking/reasoning step completes
+ | 'STATE_SNAPSHOT' // Full state synchronization
+ | 'STATE_DELTA' // Incremental state update
+ | 'CUSTOM'; // Custom extensibility events
+
+type StreamChunk =
+ | RunStartedEvent
+ | RunFinishedEvent
+ | RunErrorEvent
+ | TextMessageStartEvent
+ | TextMessageContentEvent
+ | TextMessageEndEvent
+ | ToolCallStartEvent
+ | ToolCallArgsEvent
+ | ToolCallEndEvent
+ | StepStartedEvent
+ | StepFinishedEvent
+ | StateSnapshotEvent
+ | StateDeltaEvent
+ | CustomEvent;
+
+// Example: Thinking/reasoning event
+interface StepFinishedEvent extends BaseEvent {
+ type: "STEP_FINISHED";
+ stepId: string;
+ delta?: string; // Incremental thinking token
content: string; // Accumulated thinking content
}
```
-Stream chunks represent different types of data in the stream:
+Stream events represent different types of data in the stream:
+
+- **`RUN_STARTED` / `RUN_FINISHED`** - Run lifecycle events
+- **`TEXT_MESSAGE_*`** - Text content being generated
+- **`STEP_STARTED` / `STEP_FINISHED`** - Model's reasoning process (thinking)
+- **`TOOL_CALL_*`** - Tool invocation and results
+- **`RUN_ERROR`** - Stream errors
+- **`STATE_*`** - Shared state updates
+- **`CUSTOM`** - Custom extensibility events
-- **Content chunks** - Text content being generated
-- **Thinking chunks** - Model's reasoning process (when supported by the model)
-- **Tool call chunks** - When the model calls a tool
-- **Tool result chunks** - Results from tool execution
-- **Done chunks** - Stream completion
-- **Error chunks** - Stream errors
+See [AG-UI Event Definitions](../protocol/chunk-definitions) for full details.
### `Tool`
diff --git a/docs/guides/agentic-cycle.md b/docs/guides/agentic-cycle.md
index 2e68c1e7..9e1356f5 100644
--- a/docs/guides/agentic-cycle.md
+++ b/docs/guides/agentic-cycle.md
@@ -46,21 +46,21 @@ sequenceDiagram
Note over LLM: Cycle 1: Call first tool
- LLM->>Server: tool_call: get_weather(SF)
+ LLM->>Server: TOOL_CALL_START/ARGS: get_weather(SF)
Server->>Tools: Execute get_weather
Tools-->>Server: {temp: 65, conditions: "sunny"}
- Server->>LLM: tool_result
+ Server->>LLM: TOOL_CALL_END with result
Note over LLM: Cycle 2: Call second tool
- LLM->>Server: tool_call: get_weather(LA)
+ LLM->>Server: TOOL_CALL_START/ARGS: get_weather(LA)
Server->>Tools: Execute get_weather
Tools-->>Server: {temp: 75, conditions: "clear"}
- Server->>LLM: tool_result
+ Server->>LLM: TOOL_CALL_END with result
Note over LLM: Cycle 3: Generate answer
- LLM-->>Server: content: "SF is 65°F..."
+ LLM-->>Server: TEXT_MESSAGE_CONTENT: "SF is 65°F..."
Server-->>Client: Stream response
Client->>User: Display answer
```
diff --git a/docs/guides/client-tools.md b/docs/guides/client-tools.md
index a7b19cb5..6d74eaa4 100644
--- a/docs/guides/client-tools.md
+++ b/docs/guides/client-tools.md
@@ -12,23 +12,24 @@ sequenceDiagram
participant Browser
participant ClientTool
- LLM Service->>Server: tool_call chunk
{name: "updateUI", args: {...}}
+ LLM Service->>Server: TOOL_CALL_START event
{toolName: "updateUI", toolCallId: "..."}
+ LLM Service->>Server: TOOL_CALL_ARGS event
{delta: "{...}"}
Server->>Server: Check if tool has
server execute
Note over Server: No execute function
= client tool
- Server->>Browser: Forward tool-input-available
chunk via SSE/HTTP
- Browser->>Browser: onToolCall callback
triggered
+ Server->>Browser: Forward CUSTOM event
(tool-input-available) via SSE/HTTP
+ Browser->>Browser: Client tool handler
triggered
Browser->>ClientTool: execute(args)
ClientTool->>ClientTool: Update UI,
localStorage, etc.
ClientTool-->>Browser: Return result
Browser->>Server: POST tool result
- Server->>LLM Service: Add tool_result
to conversation
+ Server->>LLM Service: Add TOOL_CALL_END with
result to conversation
Note over LLM Service: Model uses result
to continue
- LLM Service-->>Server: Stream response
- Server-->>Browser: Forward chunks
+ LLM Service-->>Server: Stream TEXT_MESSAGE_CONTENT events
+ Server-->>Browser: Forward events
```
## When to Use Client Tools
@@ -41,15 +42,16 @@ sequenceDiagram
## How It Works
-1. **Tool Call from LLM**: LLM decides to call a client tool
-2. **Server Detection**: Server sees the tool has no `execute` function
-3. **Client Notification**: Server sends a `tool-input-available` chunk to the browser
-4. **Client Execution**: Browser's `onToolCall` callback is triggered with:
+1. **Tool Call from LLM**: LLM decides to call a client tool via `TOOL_CALL_START` event
+2. **Arguments Streaming**: Tool arguments stream via `TOOL_CALL_ARGS` events
+3. **Server Detection**: Server sees the tool has no `execute` function
+4. **Client Notification**: Server sends a `CUSTOM` event (name: `tool-input-available`) to the browser
+5. **Client Execution**: Browser's client tool handler is triggered with:
- `toolName`: Name of the tool to execute
- `input`: Parsed arguments
-5. **Result Return**: Client executes the tool and returns the result
-6. **Server Update**: Result is sent back to the server and added to the conversation
-7. **LLM Continuation**: LLM receives the result and continues the conversation
+6. **Result Return**: Client executes the tool and returns the result
+7. **Server Update**: Result is sent back as a `TOOL_CALL_END` event with the result
+8. **LLM Continuation**: LLM receives the result and continues the conversation
## Defining Client Tools
@@ -199,12 +201,12 @@ function MessageComponent({ message }: { message: ChatMessages[number] }) {
## Automatic Execution
-Client tools are **automatically executed** when the model calls them. No manual `onToolCall` callback needed! The flow is:
+Client tools are **automatically executed** when the model calls them. No manual callback needed! The flow is:
-1. LLM calls a client tool
-2. Server sends `tool-input-available` chunk to browser
+1. LLM calls a client tool via `TOOL_CALL_START` and `TOOL_CALL_ARGS` events
+2. Server sends `CUSTOM` event (name: `tool-input-available`) to browser
3. Client automatically executes the matching tool implementation
-4. Result is sent back to server
+4. Result is sent back to server as a `TOOL_CALL_END` event
5. Conversation continues with the result
## Type Safety Benefits
diff --git a/docs/guides/server-tools.md b/docs/guides/server-tools.md
index 0a8e43ea..0bfcb411 100644
--- a/docs/guides/server-tools.md
+++ b/docs/guides/server-tools.md
@@ -12,24 +12,25 @@ sequenceDiagram
participant Tool
participant Database/API
- LLM Service->>Server: tool_call chunk
{name: "getUserData", args: {...}}
+ LLM Service->>Server: TOOL_CALL_START event
{toolName: "getUserData", toolCallId: "..."}
+ LLM Service->>Server: TOOL_CALL_ARGS event
{delta: "{...}"}
Server->>Server: Parse tool call
arguments
Server->>Tool: execute(parsedArgs)
Tool->>Database/API: Query/Fetch data
Database/API-->>Tool: Return data
Tool-->>Server: Return result
- Server->>Server: Create tool_result
message
- Server->>LLM Service: Continue chat with
tool_result in history
+ Server->>Server: Create TOOL_CALL_END
with result
+ Server->>LLM Service: Continue chat with
tool result in history
Note over LLM Service: Model uses result
to generate response
- LLM Service-->>Server: Stream content chunks
+ LLM Service-->>Server: Stream TEXT_MESSAGE_CONTENT events
Server-->>Server: Stream to client
```
## How It Works
-1. **Tool Call Received**: Server receives a `tool_call` chunk from the LLM
+1. **Tool Call Received**: Server receives `TOOL_CALL_START` and `TOOL_CALL_ARGS` events from the LLM
2. **Argument Parsing**: The tool arguments (JSON string) are parsed and validated against the input schema
3. **Execution**: The tool's `execute` function is called with the parsed arguments
4. **Result Processing**: The result is:
diff --git a/docs/guides/streaming.md b/docs/guides/streaming.md
index da2a806b..8e64fa50 100644
--- a/docs/guides/streaming.md
+++ b/docs/guides/streaming.md
@@ -64,30 +64,34 @@ messages.forEach((message) => {
});
```
-## Stream Chunks
+## Stream Events
-Stream chunks contain different types of data:
+TanStack AI implements the [AG-UI Protocol](https://docs.ag-ui.com/introduction) for streaming. Stream events contain different types of data:
-- **Content chunks** - Text content being generated
-- **Thinking chunks** - Model's internal reasoning process (when supported)
-- **Tool call chunks** - When the model calls a tool
-- **Tool result chunks** - Results from tool execution
-- **Done chunks** - Stream completion
+- **`RUN_STARTED` / `RUN_FINISHED`** - Run lifecycle events
+- **`TEXT_MESSAGE_START` / `TEXT_MESSAGE_CONTENT` / `TEXT_MESSAGE_END`** - Text content streaming
+- **`STEP_STARTED` / `STEP_FINISHED`** - Model's internal reasoning process (thinking)
+- **`TOOL_CALL_START` / `TOOL_CALL_ARGS` / `TOOL_CALL_END`** - Tool invocation and results
+- **`STATE_SNAPSHOT` / `STATE_DELTA`** - Shared state updates
+- **`CUSTOM`** - Custom extensibility events
-### Thinking Chunks
+### Thinking Events
-Thinking chunks represent the model's reasoning process. They stream separately from the final response text:
+Thinking events (`STEP_STARTED` / `STEP_FINISHED`) represent the model's reasoning process. They stream separately from the final response text:
```typescript
for await (const chunk of stream) {
- if (chunk.type === "thinking") {
+ if (chunk.type === "STEP_STARTED") {
+ console.log("Thinking started:", chunk.stepId);
+ }
+ if (chunk.type === "STEP_FINISHED") {
console.log("Thinking:", chunk.content); // Accumulated thinking content
console.log("Delta:", chunk.delta); // Incremental thinking token
}
}
```
-Thinking chunks are automatically converted to `ThinkingPart` in `UIMessage` objects. They are UI-only and excluded from messages sent back to the model.
+Thinking events are automatically converted to `ThinkingPart` in `UIMessage` objects. They are UI-only and excluded from messages sent back to the model.
## Connection Adapters
diff --git a/docs/guides/tool-architecture.md b/docs/guides/tool-architecture.md
index f691f510..0367da36 100644
--- a/docs/guides/tool-architecture.md
+++ b/docs/guides/tool-architecture.md
@@ -36,9 +36,9 @@ sequenceDiagram
Note over LLM Service: Model analyzes tools
and decides to use one
- LLM Service-->>Server: Stream chunks:
tool_call, content, done
- Server-->>Browser: Forward chunks via SSE/HTTP
- Browser->>Browser: Parse chunks &
update UI
+ LLM Service-->>Server: Stream AG-UI events:
TOOL_CALL_*, TEXT_MESSAGE_*, RUN_FINISHED
+ Server-->>Browser: Forward events via SSE/HTTP
+ Browser->>Browser: Parse events &
update UI
Browser->>User: Show response
```
@@ -57,11 +57,11 @@ sequenceDiagram
- Analyzes the conversation and available tools
- Decides whether to call a tool based on the user's request
- Generates tool calls with arguments
-5. **Streaming Response**: The LLM streams back chunks:
- - `tool_call` chunks with tool name and arguments
- - `content` chunks with text responses
- - `done` chunk when complete
-6. **Client Updates**: The browser receives chunks and updates the UI in real-time
+5. **Streaming Response**: The LLM streams back AG-UI events:
+ - `TOOL_CALL_START` / `TOOL_CALL_ARGS` / `TOOL_CALL_END` events for tool invocations
+ - `TEXT_MESSAGE_CONTENT` events for text responses
+ - `RUN_FINISHED` event when complete
+6. **Client Updates**: The browser receives events and updates the UI in real-time
### Code Example
@@ -114,14 +114,14 @@ Tools progress through different states during their lifecycle. Understanding th
```mermaid
stateDiagram-v2
- [*] --> AwaitingInput: tool_call received
- AwaitingInput --> InputStreaming: partial arguments
+ [*] --> AwaitingInput: TOOL_CALL_START received
+ AwaitingInput --> InputStreaming: TOOL_CALL_ARGS (partial)
InputStreaming --> InputComplete: all arguments received
InputComplete --> ApprovalRequested: needsApproval=true
InputComplete --> Executing: needsApproval=false
ApprovalRequested --> Executing: user approves
ApprovalRequested --> Cancelled: user denies
- Executing --> OutputAvailable: success
+ Executing --> OutputAvailable: TOOL_CALL_END (success)
Executing --> OutputError: error
OutputAvailable --> [*]
OutputError --> [*]
@@ -210,15 +210,16 @@ sequenceDiagram
participant LLM
participant Tool
- LLM->>Server: tool_call: send_email
+ LLM->>Server: TOOL_CALL_START: send_email
+ LLM->>Server: TOOL_CALL_ARGS: {to, subject, body}
Server->>Server: Check needsApproval
- Server->>Client: approval-requested chunk
+ Server->>Client: CUSTOM event (approval-requested)
Client->>Client: Show approval UI
User->>Client: Clicks "Approve"
Client->>Server: POST approval response
Server->>Tool: execute(args)
Tool-->>Server: result
- Server->>LLM: tool_result
+ Server->>LLM: TOOL_CALL_END with result
LLM-->>Client: Generate response
```
@@ -325,13 +326,13 @@ The LLM can call multiple tools in parallel for efficiency:
```mermaid
graph TD
- A[LLM decides to call 3 tools] --> B[tool_call index: 0]
- A --> C[tool_call index: 1]
- A --> D[tool_call index: 2]
+ A[LLM decides to call 3 tools] --> B[TOOL_CALL_START index: 0]
+ A --> C[TOOL_CALL_START index: 1]
+ A --> D[TOOL_CALL_START index: 2]
B --> E[Execute in parallel]
C --> E
D --> E
- E --> F[Collect all results]
+ E --> F[Collect all TOOL_CALL_END results]
F --> G[Continue with results]
```
diff --git a/docs/protocol/chunk-definitions.md b/docs/protocol/chunk-definitions.md
index 0f0b804b..2644e0a2 100644
--- a/docs/protocol/chunk-definitions.md
+++ b/docs/protocol/chunk-definitions.md
@@ -1,173 +1,299 @@
---
-title: Chunk Definitions
+title: AG-UI Event Definitions
id: chunk-definitions
---
-All streaming responses in TanStack AI consist of a series of **StreamChunks** - discrete JSON objects representing different events during the conversation. These chunks enable real-time updates for content generation, tool calls, errors, and completion signals.
-
-This document defines the data structures (chunks) that flow between the TanStack AI server and client during streaming chat operations.
+TanStack AI implements the [AG-UI (Agent-User Interaction) Protocol](https://docs.ag-ui.com/introduction), an open, lightweight, event-based protocol that standardizes how AI agents connect to user-facing applications.
+All streaming responses in TanStack AI consist of a series of **AG-UI Events** - discrete JSON objects representing different stages of the conversation lifecycle. These events enable real-time updates for content generation, tool calls, thinking/reasoning, and completion signals.
## Base Structure
-All chunks share a common base structure:
+All events share a common base structure:
```typescript
-interface BaseStreamChunk {
- type: StreamChunkType;
- id: string; // Unique identifier for the message/response
- model: string; // Model identifier (e.g., "gpt-4o", "claude-3-5-sonnet")
- timestamp: number; // Unix timestamp in milliseconds
+interface BaseEvent {
+ type: EventType;
+ timestamp: number; // Unix timestamp in milliseconds
+ model?: string; // Model identifier (TanStack AI addition)
+ rawEvent?: unknown; // Original provider event for debugging
}
```
-### Chunk Types
+### Event Types
+
+```typescript
+type EventType =
+ | 'RUN_STARTED' // Run lifecycle begins
+ | 'RUN_FINISHED' // Run completed successfully
+ | 'RUN_ERROR' // Error occurred
+ | 'TEXT_MESSAGE_START' // Text message begins
+ | 'TEXT_MESSAGE_CONTENT' // Text content streaming
+ | 'TEXT_MESSAGE_END' // Text message completes
+ | 'TOOL_CALL_START' // Tool invocation begins
+ | 'TOOL_CALL_ARGS' // Tool arguments streaming
+ | 'TOOL_CALL_END' // Tool call completes (with result)
+ | 'STEP_STARTED' // Thinking/reasoning step begins
+ | 'STEP_FINISHED' // Thinking/reasoning step completes
+ | 'STATE_SNAPSHOT' // Full state synchronization
+ | 'STATE_DELTA' // Incremental state update
+ | 'CUSTOM'; // Custom extensibility events
+```
+
+## Event Definitions
+
+### RUN_STARTED
+
+Emitted when a run begins. This is the first event in any streaming response.
```typescript
-type StreamChunkType =
- | 'content' // Text content being generated
- | 'thinking' // Model's reasoning process (when supported)
- | 'tool_call' // Model calling a tool/function
- | 'tool-input-available' // Tool inputs are ready for client execution
- | 'approval-requested' // Tool requires user approval
- | 'tool_result' // Result from tool execution
- | 'done' // Stream completion
- | 'error'; // Error occurred
+interface RunStartedEvent extends BaseEvent {
+ type: 'RUN_STARTED';
+ runId: string; // Unique identifier for this run
+ threadId?: string; // Optional thread/conversation ID
+}
+```
+
+**Example:**
+```json
+{
+ "type": "RUN_STARTED",
+ "runId": "run_abc123",
+ "model": "gpt-4o",
+ "timestamp": 1701234567890
+}
```
-## Chunk Definitions
+---
-### ContentStreamChunk
+### RUN_FINISHED
-Emitted when the model generates text content. Sent incrementally as tokens are generated.
+Emitted when a run completes successfully.
```typescript
-interface ContentStreamChunk extends BaseStreamChunk {
- type: 'content';
- delta: string; // The incremental content token (new text since last chunk)
- content: string; // Full accumulated content so far
- role?: 'assistant';
+interface RunFinishedEvent extends BaseEvent {
+ type: 'RUN_FINISHED';
+ runId: string;
+ finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null;
+ usage?: {
+ promptTokens: number;
+ completionTokens: number;
+ totalTokens: number;
+ };
}
```
**Example:**
```json
{
- "type": "content",
- "id": "chatcmpl-abc123",
+ "type": "RUN_FINISHED",
+ "runId": "run_abc123",
+ "model": "gpt-4o",
+ "timestamp": 1701234567892,
+ "finishReason": "stop",
+ "usage": {
+ "promptTokens": 150,
+ "completionTokens": 75,
+ "totalTokens": 225
+ }
+}
+```
+
+**Finish Reasons:**
+- `stop` - Natural completion
+- `length` - Reached max tokens
+- `content_filter` - Stopped by content filtering
+- `tool_calls` - Stopped to execute tools
+- `null` - Unknown or not provided
+
+---
+
+### RUN_ERROR
+
+Emitted when an error occurs during a run.
+
+```typescript
+interface RunErrorEvent extends BaseEvent {
+ type: 'RUN_ERROR';
+ runId?: string;
+ error: {
+ message: string;
+ code?: string;
+ };
+}
+```
+
+**Example:**
+```json
+{
+ "type": "RUN_ERROR",
+ "runId": "run_abc123",
+ "model": "gpt-4o",
+ "timestamp": 1701234567893,
+ "error": {
+ "message": "Rate limit exceeded",
+ "code": "rate_limit_exceeded"
+ }
+}
+```
+
+---
+
+### TEXT_MESSAGE_START
+
+Emitted when a text message begins streaming.
+
+```typescript
+interface TextMessageStartEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_START';
+ messageId: string;
+ role: 'assistant';
+}
+```
+
+**Example:**
+```json
+{
+ "type": "TEXT_MESSAGE_START",
+ "messageId": "msg_xyz789",
"model": "gpt-4o",
"timestamp": 1701234567890,
- "delta": "Hello",
- "content": "Hello",
"role": "assistant"
}
```
-**Usage:**
-- Display `delta` for smooth streaming effect
-- Use `content` for the complete message so far
-- Multiple content chunks will be sent for a single response
-
---
-### ThinkingStreamChunk
+### TEXT_MESSAGE_CONTENT
-Emitted when the model exposes its reasoning process (e.g., Claude with extended thinking, o1 models).
+Emitted for each chunk of text content as it streams.
```typescript
-interface ThinkingStreamChunk extends BaseStreamChunk {
- type: 'thinking';
- delta?: string; // The incremental thinking token
- content: string; // Full accumulated thinking content so far
+interface TextMessageContentEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_CONTENT';
+ messageId: string;
+ delta: string; // The incremental content token
+ content?: string; // Full accumulated content so far (TanStack AI addition)
}
```
**Example:**
```json
{
- "type": "thinking",
- "id": "chatcmpl-abc123",
- "model": "claude-3-5-sonnet",
+ "type": "TEXT_MESSAGE_CONTENT",
+ "messageId": "msg_xyz789",
+ "model": "gpt-4o",
"timestamp": 1701234567890,
- "delta": "First, I need to",
- "content": "First, I need to"
+ "delta": "Hello",
+ "content": "Hello"
}
```
-**Usage:**
-- Display in a separate "thinking" UI element
-- Thinking is excluded from messages sent back to the model
-- Not all models support thinking chunks
+---
+
+### TEXT_MESSAGE_END
+
+Emitted when a text message completes.
+
+```typescript
+interface TextMessageEndEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_END';
+ messageId: string;
+}
+```
+
+**Example:**
+```json
+{
+ "type": "TEXT_MESSAGE_END",
+ "messageId": "msg_xyz789",
+ "model": "gpt-4o",
+ "timestamp": 1701234567891
+}
+```
---
-### ToolCallStreamChunk
+### TOOL_CALL_START
-Emitted when the model decides to call a tool/function.
+Emitted when a tool call begins.
```typescript
-interface ToolCallStreamChunk extends BaseStreamChunk {
- type: 'tool_call';
- toolCall: {
+interface ToolCallStartEvent extends BaseEvent {
+ type: 'TOOL_CALL_START';
+ toolCallId: string;
+ toolName: string;
+ index?: number; // Index for parallel tool calls
+ approval?: { // Present if tool requires approval
id: string;
- type: 'function';
- function: {
- name: string;
- arguments: string; // JSON string (may be partial/incremental)
- };
+ needsApproval: true;
};
- index: number; // Index of this tool call (for parallel calls)
}
```
**Example:**
```json
{
- "type": "tool_call",
- "id": "chatcmpl-abc123",
+ "type": "TOOL_CALL_START",
+ "toolCallId": "call_abc123",
+ "toolName": "get_weather",
"model": "gpt-4o",
"timestamp": 1701234567890,
- "toolCall": {
- "id": "call_abc123",
- "type": "function",
- "function": {
- "name": "get_weather",
- "arguments": "{\"location\":\"San Francisco\"}"
- }
- },
"index": 0
}
```
-**Usage:**
-- Multiple chunks may be sent for a single tool call (streaming arguments)
-- `arguments` may be incomplete until all chunks for this tool call are received
-- `index` allows multiple parallel tool calls
-
---
-### ToolInputAvailableStreamChunk
+### TOOL_CALL_ARGS
-Emitted when tool inputs are complete and ready for client-side execution.
+Emitted as tool call arguments stream.
```typescript
-interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
- type: 'tool-input-available';
- toolCallId: string; // ID of the tool call
- toolName: string; // Name of the tool to execute
- input: any; // Parsed tool arguments (JSON object)
+interface ToolCallArgsEvent extends BaseEvent {
+ type: 'TOOL_CALL_ARGS';
+ toolCallId: string;
+ delta: string; // Incremental JSON arguments
+ args?: string; // Full accumulated arguments so far
}
```
**Example:**
```json
{
- "type": "tool-input-available",
- "id": "chatcmpl-abc123",
+ "type": "TOOL_CALL_ARGS",
+ "toolCallId": "call_abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
+ "delta": "{\"location\":",
+ "args": "{\"location\":"
+}
+```
+
+---
+
+### TOOL_CALL_END
+
+Emitted when a tool call completes. May include the result if the tool was executed server-side.
+
+```typescript
+interface ToolCallEndEvent extends BaseEvent {
+ type: 'TOOL_CALL_END';
+ toolCallId: string;
+ toolName: string;
+ input?: any; // Final parsed input arguments
+ result?: string; // Tool execution result (if executed)
+}
+```
+
+**Example (client-side tool):**
+```json
+{
+ "type": "TOOL_CALL_END",
"toolCallId": "call_abc123",
"toolName": "get_weather",
+ "model": "gpt-4o",
+ "timestamp": 1701234567890,
"input": {
"location": "San Francisco",
"unit": "fahrenheit"
@@ -175,215 +301,210 @@ interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
}
```
-**Usage:**
-- Signals that the client should execute the tool
-- Only sent for tools without a server-side `execute` function
-- Client calls `onToolCall` callback with these parameters
+**Example (server-side tool with result):**
+```json
+{
+ "type": "TOOL_CALL_END",
+ "toolCallId": "call_abc123",
+ "toolName": "get_weather",
+ "model": "gpt-4o",
+ "timestamp": 1701234567891,
+ "input": { "location": "San Francisco" },
+ "result": "{\"temperature\":72,\"conditions\":\"sunny\"}"
+}
+```
---
-### ApprovalRequestedStreamChunk
+### STEP_STARTED
-Emitted when a tool requires user approval before execution.
+Emitted when a thinking/reasoning step begins (e.g., Claude's extended thinking, o1 models).
```typescript
-interface ApprovalRequestedStreamChunk extends BaseStreamChunk {
- type: 'approval-requested';
- toolCallId: string; // ID of the tool call
- toolName: string; // Name of the tool requiring approval
- input: any; // Tool arguments for review
- approval: {
- id: string; // Unique approval request ID
- needsApproval: true; // Always true
- };
+interface StepStartedEvent extends BaseEvent {
+ type: 'STEP_STARTED';
+ stepId: string;
+ stepType: 'thinking' | 'reasoning' | 'planning';
}
```
**Example:**
```json
{
- "type": "approval-requested",
- "id": "chatcmpl-abc123",
- "model": "gpt-4o",
- "timestamp": 1701234567890,
- "toolCallId": "call_abc123",
- "toolName": "send_email",
- "input": {
- "to": "user@example.com",
- "subject": "Hello",
- "body": "Test email"
- },
- "approval": {
- "id": "approval_xyz789",
- "needsApproval": true
- }
+ "type": "STEP_STARTED",
+ "stepId": "step_xyz123",
+ "stepType": "thinking",
+ "model": "claude-3-5-sonnet",
+ "timestamp": 1701234567890
}
```
-**Usage:**
-- Display approval UI to user
-- User responds with approval decision via `addToolApprovalResponse()`
-- Tool execution pauses until approval is granted or denied
-
---
-### ToolResultStreamChunk
+### STEP_FINISHED
-Emitted when a tool execution completes (either server-side or client-side).
+Emitted when thinking/reasoning content streams or completes.
```typescript
-interface ToolResultStreamChunk extends BaseStreamChunk {
- type: 'tool_result';
- toolCallId: string; // ID of the tool call that was executed
- content: string; // Result of the tool execution (JSON stringified)
+interface StepFinishedEvent extends BaseEvent {
+ type: 'STEP_FINISHED';
+ stepId: string;
+ delta?: string; // Incremental thinking token
+ content: string; // Full accumulated thinking content
}
```
**Example:**
```json
{
- "type": "tool_result",
- "id": "chatcmpl-abc123",
- "model": "gpt-4o",
- "timestamp": 1701234567891,
- "toolCallId": "call_abc123",
- "content": "{\"temperature\":72,\"conditions\":\"sunny\"}"
+ "type": "STEP_FINISHED",
+ "stepId": "step_xyz123",
+ "model": "claude-3-5-sonnet",
+ "timestamp": 1701234567890,
+ "delta": "Let me analyze",
+ "content": "Let me analyze"
}
```
-**Usage:**
-- Sent after tool execution completes
-- Model uses this result to continue the conversation
-- May trigger additional model responses
-
---
-### DoneStreamChunk
+### STATE_SNAPSHOT
-Emitted when the stream completes successfully.
+Emitted for full state synchronization (shared state between agent and app).
```typescript
-interface DoneStreamChunk extends BaseStreamChunk {
- type: 'done';
- finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null;
- usage?: {
- promptTokens: number;
- completionTokens: number;
- totalTokens: number;
- };
+interface StateSnapshotEvent extends BaseEvent {
+ type: 'STATE_SNAPSHOT';
+ state: Record;
}
```
**Example:**
```json
{
- "type": "done",
- "id": "chatcmpl-abc123",
- "model": "gpt-4o",
- "timestamp": 1701234567892,
- "finishReason": "stop",
- "usage": {
- "promptTokens": 150,
- "completionTokens": 75,
- "totalTokens": 225
+ "type": "STATE_SNAPSHOT",
+ "timestamp": 1701234567890,
+ "state": {
+ "currentStep": 3,
+ "progress": 0.75,
+ "context": { "user": "John" }
}
}
```
-**Finish Reasons:**
-- `stop` - Natural completion
-- `length` - Reached max tokens
-- `content_filter` - Stopped by content filtering
-- `tool_calls` - Stopped to execute tools
-- `null` - Unknown or not provided
+---
-**Usage:**
-- Marks the end of a successful stream
-- Clean up streaming state
-- Display token usage (if available)
+### STATE_DELTA
+
+Emitted for incremental state updates using JSON Patch-like operations.
+
+```typescript
+interface StateDeltaEvent extends BaseEvent {
+ type: 'STATE_DELTA';
+ delta: Array<{
+ op: 'add' | 'remove' | 'replace';
+ path: string;
+ value?: unknown;
+ }>;
+}
+```
+
+**Example:**
+```json
+{
+ "type": "STATE_DELTA",
+ "timestamp": 1701234567890,
+ "delta": [
+ { "op": "replace", "path": "/progress", "value": 0.80 },
+ { "op": "add", "path": "/results/0", "value": "item1" }
+ ]
+}
+```
---
-### ErrorStreamChunk
+### CUSTOM
-Emitted when an error occurs during streaming.
+Custom event for extensibility. Used for features not covered by standard AG-UI events.
```typescript
-interface ErrorStreamChunk extends BaseStreamChunk {
- type: 'error';
- error: {
- message: string; // Human-readable error message
- code?: string; // Optional error code
- };
+interface CustomEvent extends BaseEvent {
+ type: 'CUSTOM';
+ name: string;
+ value: unknown;
}
```
-**Example:**
+**Example (approval request):**
```json
{
- "type": "error",
- "id": "chatcmpl-abc123",
+ "type": "CUSTOM",
+ "name": "approval-requested",
"model": "gpt-4o",
- "timestamp": 1701234567893,
- "error": {
- "message": "Rate limit exceeded",
- "code": "rate_limit_exceeded"
+ "timestamp": 1701234567890,
+ "value": {
+ "toolCallId": "call_abc123",
+ "toolName": "send_email",
+ "input": { "to": "user@example.com", "subject": "Hello" },
+ "approval": { "id": "approval_xyz789" }
}
}
```
-**Common Error Codes:**
-- `rate_limit_exceeded` - API rate limit hit
-- `invalid_request` - Malformed request
-- `authentication_error` - API key issues
-- `timeout` - Request timed out
-- `server_error` - Internal server error
-
-**Usage:**
-- Display error to user
-- Stream ends after error chunk
-- Retry logic should be implemented client-side
-
---
-## Chunk Ordering and Relationships
+## Event Ordering and Relationships
### Typical Flow
-1. **Content Generation:**
+1. **Simple Content Generation:**
```
- ContentStreamChunk (delta: "Hello")
- ContentStreamChunk (delta: " world")
- ContentStreamChunk (delta: "!")
- DoneStreamChunk (finishReason: "stop")
+ RUN_STARTED
+ TEXT_MESSAGE_START
+ TEXT_MESSAGE_CONTENT (delta: "Hello")
+ TEXT_MESSAGE_CONTENT (delta: " world")
+ TEXT_MESSAGE_CONTENT (delta: "!")
+ TEXT_MESSAGE_END
+ RUN_FINISHED (finishReason: "stop")
```
2. **With Thinking:**
```
- ThinkingStreamChunk (delta: "I need to...")
- ThinkingStreamChunk (delta: " check the weather")
- ContentStreamChunk (delta: "Let me check")
- DoneStreamChunk (finishReason: "stop")
+ RUN_STARTED
+ STEP_STARTED (stepType: "thinking")
+ STEP_FINISHED (delta: "I need to...")
+ STEP_FINISHED (delta: " check the weather")
+ TEXT_MESSAGE_START
+ TEXT_MESSAGE_CONTENT (delta: "Let me check")
+ TEXT_MESSAGE_END
+ RUN_FINISHED (finishReason: "stop")
```
3. **Tool Usage:**
```
- ToolCallStreamChunk (name: "get_weather")
- ToolResultStreamChunk (content: "{...}")
- ContentStreamChunk (delta: "The weather is...")
- DoneStreamChunk (finishReason: "stop")
+ RUN_STARTED
+ TOOL_CALL_START (toolName: "get_weather")
+ TOOL_CALL_ARGS (delta: "{\"location\":\"SF\"}")
+ TOOL_CALL_END (input: {"location":"SF"}, result: "{...}")
+ TEXT_MESSAGE_START
+ TEXT_MESSAGE_CONTENT (delta: "The weather is...")
+ TEXT_MESSAGE_END
+ RUN_FINISHED (finishReason: "stop")
```
4. **Client Tool with Approval:**
```
- ToolCallStreamChunk (name: "send_email")
- ApprovalRequestedStreamChunk (toolName: "send_email")
+ RUN_STARTED
+ TOOL_CALL_START (toolName: "send_email", approval: {...})
+ TOOL_CALL_ARGS (delta: "{...}")
+ CUSTOM (name: "approval-requested")
[User approves]
- ToolInputAvailableStreamChunk (toolName: "send_email")
+ TOOL_CALL_END (input: {...})
[Client executes]
- ToolResultStreamChunk (content: "{\"sent\":true}")
- ContentStreamChunk (delta: "Email sent successfully")
- DoneStreamChunk (finishReason: "stop")
+ TEXT_MESSAGE_START
+ TEXT_MESSAGE_CONTENT (delta: "Email sent successfully")
+ TEXT_MESSAGE_END
+ RUN_FINISHED (finishReason: "stop")
```
### Multiple Tool Calls
@@ -391,45 +512,56 @@ interface ErrorStreamChunk extends BaseStreamChunk {
When the model calls multiple tools in parallel:
```
-ToolCallStreamChunk (index: 0, name: "get_weather")
-ToolCallStreamChunk (index: 1, name: "get_time")
-ToolResultStreamChunk (toolCallId: "call_1")
-ToolResultStreamChunk (toolCallId: "call_2")
-ContentStreamChunk (delta: "Based on the data...")
-DoneStreamChunk (finishReason: "stop")
+RUN_STARTED
+TOOL_CALL_START (index: 0, toolName: "get_weather")
+TOOL_CALL_START (index: 1, toolName: "get_time")
+TOOL_CALL_ARGS (toolCallId: "call_1", ...)
+TOOL_CALL_ARGS (toolCallId: "call_2", ...)
+TOOL_CALL_END (toolCallId: "call_1", ...)
+TOOL_CALL_END (toolCallId: "call_2", ...)
+TEXT_MESSAGE_START
+TEXT_MESSAGE_CONTENT (delta: "Based on the data...")
+TEXT_MESSAGE_END
+RUN_FINISHED (finishReason: "stop")
```
---
## TypeScript Union Type
-All chunks are represented as a discriminated union:
+All events are represented as a discriminated union:
```typescript
type StreamChunk =
- | ContentStreamChunk
- | ThinkingStreamChunk
- | ToolCallStreamChunk
- | ToolInputAvailableStreamChunk
- | ApprovalRequestedStreamChunk
- | ToolResultStreamChunk
- | DoneStreamChunk
- | ErrorStreamChunk;
+ | RunStartedEvent
+ | RunFinishedEvent
+ | RunErrorEvent
+ | TextMessageStartEvent
+ | TextMessageContentEvent
+ | TextMessageEndEvent
+ | ToolCallStartEvent
+ | ToolCallArgsEvent
+ | ToolCallEndEvent
+ | StepStartedEvent
+ | StepFinishedEvent
+ | StateSnapshotEvent
+ | StateDeltaEvent
+ | CustomEvent;
```
This enables type-safe handling in TypeScript:
```typescript
-function handleChunk(chunk: StreamChunk) {
- switch (chunk.type) {
- case 'content':
- console.log(chunk.delta); // TypeScript knows this is ContentStreamChunk
+function handleEvent(event: StreamChunk) {
+ switch (event.type) {
+ case 'TEXT_MESSAGE_CONTENT':
+ console.log(event.delta); // TypeScript knows this is TextMessageContentEvent
break;
- case 'thinking':
- console.log(chunk.content); // TypeScript knows this is ThinkingStreamChunk
+ case 'TOOL_CALL_START':
+ console.log(event.toolName); // TypeScript knows structure
break;
- case 'tool_call':
- console.log(chunk.toolCall.function.name); // TypeScript knows structure
+ case 'RUN_FINISHED':
+ console.log(event.usage); // TypeScript knows this is RunFinishedEvent
break;
// ... other cases
}
@@ -438,8 +570,24 @@ function handleChunk(chunk: StreamChunk) {
---
+## AG-UI Compatibility
+
+TanStack AI's streaming protocol is fully compatible with the AG-UI specification. This means:
+
+1. **Ecosystem Interoperability**: TanStack AI can work with AG-UI-compatible tools and frameworks like LangGraph, CrewAI, and CopilotKit.
+
+2. **Standard Event Types**: All 14 AG-UI event types are supported.
+
+3. **TanStack AI Additions**: We add useful fields like `model` on every event and `content` accumulation on text events for convenience.
+
+4. **Extensibility**: The `CUSTOM` event type allows for any additional functionality not covered by the standard events.
+
+For more information about AG-UI, visit the [official documentation](https://docs.ag-ui.com/introduction).
+
+---
+
## See Also
-- [SSE Protocol](./sse-protocol) - How chunks are transmitted via Server-Sent Events
-- [HTTP Stream Protocol](./http-stream-protocol) - How chunks are transmitted via HTTP streaming
+- [SSE Protocol](./sse-protocol) - How events are transmitted via Server-Sent Events
+- [HTTP Stream Protocol](./http-stream-protocol) - How events are transmitted via HTTP streaming
- [Connection Adapters Guide](../guides/connection-adapters) - Client implementation
diff --git a/docs/protocol/http-stream-protocol.md b/docs/protocol/http-stream-protocol.md
index 8330cd42..928533e7 100644
--- a/docs/protocol/http-stream-protocol.md
+++ b/docs/protocol/http-stream-protocol.md
@@ -80,25 +80,29 @@ Each StreamChunk is transmitted as a single line of JSON followed by a newline (
### Examples
-#### Content Chunks
+#### Content Streaming (AG-UI Events)
```json
-{"type":"content","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello","role":"assistant"}
-{"type":"content","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567891,"delta":" world","content":"Hello world","role":"assistant"}
-{"type":"content","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567892,"delta":"!","content":"Hello world!","role":"assistant"}
+{"type":"RUN_STARTED","runId":"run_abc123","model":"gpt-4o","timestamp":1701234567890}
+{"type":"TEXT_MESSAGE_START","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567891,"delta":" world","content":"Hello world"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567892,"delta":"!","content":"Hello world!"}
+{"type":"TEXT_MESSAGE_END","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567892}
```
#### Tool Call
```json
-{"type":"tool_call","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567893,"toolCall":{"id":"call_xyz","type":"function","function":{"name":"get_weather","arguments":"{\"location\":\"SF\"}"}},"index":0}
-{"type":"tool_result","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567894,"toolCallId":"call_xyz","content":"{\"temperature\":72,\"conditions\":\"sunny\"}"}
+{"type":"TOOL_CALL_START","toolCallId":"call_xyz","toolName":"get_weather","model":"gpt-4o","timestamp":1701234567893,"index":0}
+{"type":"TOOL_CALL_ARGS","toolCallId":"call_xyz","model":"gpt-4o","timestamp":1701234567893,"delta":"{\"location\":\"SF\"}","args":"{\"location\":\"SF\"}"}
+{"type":"TOOL_CALL_END","toolCallId":"call_xyz","toolName":"get_weather","model":"gpt-4o","timestamp":1701234567894,"input":{"location":"SF"},"result":"{\"temperature\":72,\"conditions\":\"sunny\"}"}
```
#### Stream Completion
```json
-{"type":"done","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567895,"finishReason":"stop","usage":{"promptTokens":10,"completionTokens":15,"totalTokens":25}}
+{"type":"RUN_FINISHED","runId":"run_abc123","model":"gpt-4o","timestamp":1701234567895,"finishReason":"stop","usage":{"promptTokens":10,"completionTokens":15,"totalTokens":25}}
```
---
@@ -127,14 +131,16 @@ Transfer-Encoding: chunked
### 3. Server Streams Chunks
-The server sends newline-delimited JSON:
+The server sends newline-delimited AG-UI events:
```json
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"The","content":"The"}
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" weather","content":"The weather"}
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567892,"delta":" is","content":"The weather is"}
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567893,"delta":" sunny","content":"The weather is sunny"}
-{"type":"done","id":"msg_1","model":"gpt-4o","timestamp":1701234567894,"finishReason":"stop"}
+{"type":"RUN_STARTED","runId":"run_1","model":"gpt-4o","timestamp":1701234567890}
+{"type":"TEXT_MESSAGE_START","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"The","content":"The"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" weather","content":"The weather"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567892,"delta":" is sunny","content":"The weather is sunny"}
+{"type":"TEXT_MESSAGE_END","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567893}
+{"type":"RUN_FINISHED","runId":"run_1","model":"gpt-4o","timestamp":1701234567894,"finishReason":"stop"}
```
### 4. Stream Completion
@@ -147,10 +153,10 @@ Server closes the connection. No special marker needed (unlike SSE's `[DONE]`).
### Server-Side Errors
-If an error occurs during generation, send an error chunk:
+If an error occurs during generation, send a RUN_ERROR event:
```json
-{"type":"error","id":"msg_1","model":"gpt-4o","timestamp":1701234567895,"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}
+{"type":"RUN_ERROR","runId":"run_1","model":"gpt-4o","timestamp":1701234567895,"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}
```
Then close the connection.
@@ -196,14 +202,14 @@ export async function POST(request: Request) {
}
controller.close();
} catch (error: any) {
- const errorChunk = {
- type: 'error',
+ const errorEvent = {
+ type: 'RUN_ERROR',
error: {
message: error.message || 'Unknown error',
code: error.code,
},
};
- controller.enqueue(encoder.encode(JSON.stringify(errorChunk) + '\n'));
+ controller.enqueue(encoder.encode(JSON.stringify(errorEvent) + '\n'));
controller.close();
}
},
@@ -246,11 +252,11 @@ app.post('/api/chat', async (req, res) => {
res.write(JSON.stringify(chunk) + '\n');
}
} catch (error: any) {
- const errorChunk = {
- type: 'error',
+ const errorEvent = {
+ type: 'RUN_ERROR',
error: { message: error.message },
};
- res.write(JSON.stringify(errorChunk) + '\n');
+ res.write(JSON.stringify(errorEvent) + '\n');
} finally {
res.end();
}
@@ -362,9 +368,12 @@ The `-N` flag disables buffering to see real-time output.
**Example Output:**
```json
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}
-{"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" there","content":"Hello there"}
-{"type":"done","id":"msg_1","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop"}
+{"type":"RUN_STARTED","runId":"run_1","model":"gpt-4o","timestamp":1701234567889}
+{"type":"TEXT_MESSAGE_START","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}
+{"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" there","content":"Hello there"}
+{"type":"TEXT_MESSAGE_END","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567892}
+{"type":"RUN_FINISHED","runId":"run_1","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop"}
```
### Validating NDJSON
diff --git a/docs/protocol/sse-protocol.md b/docs/protocol/sse-protocol.md
index 1f9f3d9e..7d865140 100644
--- a/docs/protocol/sse-protocol.md
+++ b/docs/protocol/sse-protocol.md
@@ -69,24 +69,48 @@ data: {JSON_ENCODED_CHUNK}\n\n
3. **Ends with double newline `\n\n`**
4. **No event names or IDs** (not required for our use case)
-### Examples
+### Examples (AG-UI Events)
-#### Content Chunk
+#### Run Started
```
-data: {"type":"content","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello","role":"assistant"}\n\n
+data: {"type":"RUN_STARTED","runId":"run_abc123","model":"gpt-4o","timestamp":1701234567890}\n\n
```
-#### Tool Call Chunk
+#### Text Message Start
```
-data: {"type":"tool_call","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567891,"toolCall":{"id":"call_xyz","type":"function","function":{"name":"get_weather","arguments":"{\"location\":\"SF\"}"}},"index":0}\n\n
+data: {"type":"TEXT_MESSAGE_START","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}\n\n
```
-#### Done Chunk
+#### Text Message Content
```
-data: {"type":"done","id":"chatcmpl-abc123","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop","usage":{"promptTokens":10,"completionTokens":5,"totalTokens":15}}\n\n
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_xyz789","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}\n\n
+```
+
+#### Tool Call Start
+
+```
+data: {"type":"TOOL_CALL_START","toolCallId":"call_xyz","toolName":"get_weather","model":"gpt-4o","timestamp":1701234567891,"index":0}\n\n
+```
+
+#### Tool Call Args
+
+```
+data: {"type":"TOOL_CALL_ARGS","toolCallId":"call_xyz","model":"gpt-4o","timestamp":1701234567891,"delta":"{\"location\":\"SF\"}","args":"{\"location\":\"SF\"}"}\n\n
+```
+
+#### Tool Call End
+
+```
+data: {"type":"TOOL_CALL_END","toolCallId":"call_xyz","toolName":"get_weather","model":"gpt-4o","timestamp":1701234567892,"input":{"location":"SF"}}\n\n
+```
+
+#### Run Finished
+
+```
+data: {"type":"RUN_FINISHED","runId":"run_abc123","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop","usage":{"promptTokens":10,"completionTokens":5,"totalTokens":15}}\n\n
```
---
@@ -115,16 +139,18 @@ Cache-Control: no-cache
Connection: keep-alive
```
-### 3. Server Streams Chunks
+### 3. Server Streams Events
-The server sends multiple `data:` events as chunks are generated:
+The server sends multiple AG-UI events as content is generated:
```
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"The","content":"The"}\n\n
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" weather","content":"The weather"}\n\n
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567892,"delta":" is","content":"The weather is"}\n\n
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567893,"delta":" sunny","content":"The weather is sunny"}\n\n
-data: {"type":"done","id":"msg_1","model":"gpt-4o","timestamp":1701234567894,"finishReason":"stop"}\n\n
+data: {"type":"RUN_STARTED","runId":"run_1","model":"gpt-4o","timestamp":1701234567889}\n\n
+data: {"type":"TEXT_MESSAGE_START","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}\n\n
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"The","content":"The"}\n\n
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" weather","content":"The weather"}\n\n
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567892,"delta":" is sunny","content":"The weather is sunny"}\n\n
+data: {"type":"TEXT_MESSAGE_END","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567893}\n\n
+data: {"type":"RUN_FINISHED","runId":"run_1","model":"gpt-4o","timestamp":1701234567894,"finishReason":"stop"}\n\n
```
### 4. Stream Completion
@@ -143,10 +169,10 @@ Then closes the connection.
### Server-Side Errors
-If an error occurs during generation, send an error chunk:
+If an error occurs during generation, send a RUN_ERROR event:
```
-data: {"type":"error","id":"msg_1","model":"gpt-4o","timestamp":1701234567895,"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}\n\n
+data: {"type":"RUN_ERROR","runId":"run_1","model":"gpt-4o","timestamp":1701234567895,"error":{"message":"Rate limit exceeded","code":"rate_limit_exceeded"}}\n\n
```
Then close the connection.
@@ -231,11 +257,11 @@ export async function POST(request: Request) {
controller.enqueue(encoder.encode('data: [DONE]\n\n'));
controller.close();
} catch (error) {
- const errorChunk = {
- type: 'error',
+ const errorEvent = {
+ type: 'RUN_ERROR',
error: { message: error.message }
};
- controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorChunk)}\n\n`));
+ controller.enqueue(encoder.encode(`data: ${JSON.stringify(errorEvent)}\n\n`));
controller.close();
}
}
@@ -305,11 +331,17 @@ The `-N` flag disables buffering to see real-time output.
**Example Output:**
```
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}
+data: {"type":"RUN_STARTED","runId":"run_1","model":"gpt-4o","timestamp":1701234567889}
+
+data: {"type":"TEXT_MESSAGE_START","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"role":"assistant"}
+
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567890,"delta":"Hello","content":"Hello"}
+
+data: {"type":"TEXT_MESSAGE_CONTENT","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" there","content":"Hello there"}
-data: {"type":"content","id":"msg_1","model":"gpt-4o","timestamp":1701234567891,"delta":" there","content":"Hello there"}
+data: {"type":"TEXT_MESSAGE_END","messageId":"msg_1","model":"gpt-4o","timestamp":1701234567891}
-data: {"type":"done","id":"msg_1","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop"}
+data: {"type":"RUN_FINISHED","runId":"run_1","model":"gpt-4o","timestamp":1701234567892,"finishReason":"stop"}
data: [DONE]
```
diff --git a/docs/reference/classes/BaseAdapter.md b/docs/reference/classes/BaseAdapter.md
index 1127e644..e31a4893 100644
--- a/docs/reference/classes/BaseAdapter.md
+++ b/docs/reference/classes/BaseAdapter.md
@@ -60,7 +60,7 @@ Generic parameters:
new BaseAdapter(config): BaseAdapter;
```
-Defined in: [base-adapter.ts:70](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L70)
+Defined in: [base-adapter.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L66)
#### Parameters
@@ -80,7 +80,7 @@ Defined in: [base-adapter.ts:70](https://github.com/TanStack/ai/blob/main/packag
optional _chatProviderOptions: TChatProviderOptions;
```
-Defined in: [base-adapter.ts:61](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L61)
+Defined in: [base-adapter.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L57)
#### Implementation of
@@ -94,7 +94,7 @@ Defined in: [base-adapter.ts:61](https://github.com/TanStack/ai/blob/main/packag
optional _embeddingProviderOptions: TEmbeddingProviderOptions;
```
-Defined in: [base-adapter.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L62)
+Defined in: [base-adapter.ts:58](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L58)
#### Implementation of
@@ -108,7 +108,7 @@ Defined in: [base-adapter.ts:62](https://github.com/TanStack/ai/blob/main/packag
optional _messageMetadataByModality: TMessageMetadataByModality;
```
-Defined in: [base-adapter.ts:68](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L68)
+Defined in: [base-adapter.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L64)
Type-only map from modality type to adapter-specific metadata types.
Used to provide type-safe autocomplete for metadata on content parts.
@@ -125,7 +125,7 @@ Used to provide type-safe autocomplete for metadata on content parts.
optional _modelInputModalitiesByName: TModelInputModalitiesByName;
```
-Defined in: [base-adapter.ts:66](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L66)
+Defined in: [base-adapter.ts:62](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L62)
Type-only map from model name to its supported input modalities.
Used by the core AI types to narrow ContentPart types based on the selected model.
@@ -143,7 +143,7 @@ Must be provided by all adapters.
_modelProviderOptionsByName: TModelProviderOptionsByName;
```
-Defined in: [base-adapter.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L64)
+Defined in: [base-adapter.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L60)
Type-only map from model name to its specific provider options.
Used by the core AI types to narrow providerOptions based on the selected model.
@@ -161,7 +161,7 @@ Must be provided by all adapters.
optional _providerOptions: TChatProviderOptions;
```
-Defined in: [base-adapter.ts:60](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L60)
+Defined in: [base-adapter.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L56)
#### Implementation of
@@ -175,7 +175,7 @@ Defined in: [base-adapter.ts:60](https://github.com/TanStack/ai/blob/main/packag
protected config: AIAdapterConfig;
```
-Defined in: [base-adapter.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L57)
+Defined in: [base-adapter.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L53)
***
@@ -185,7 +185,7 @@ Defined in: [base-adapter.ts:57](https://github.com/TanStack/ai/blob/main/packag
optional embeddingModels: TEmbeddingModels;
```
-Defined in: [base-adapter.ts:56](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L56)
+Defined in: [base-adapter.ts:52](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L52)
Models that support embeddings
@@ -201,7 +201,7 @@ Models that support embeddings
abstract models: TChatModels;
```
-Defined in: [base-adapter.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L55)
+Defined in: [base-adapter.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L51)
Models that support chat/text completion
@@ -217,7 +217,7 @@ Models that support chat/text completion
abstract name: string;
```
-Defined in: [base-adapter.ts:54](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L54)
+Defined in: [base-adapter.ts:50](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L50)
#### Implementation of
@@ -231,7 +231,7 @@ Defined in: [base-adapter.ts:54](https://github.com/TanStack/ai/blob/main/packag
abstract chatStream(options): AsyncIterable;
```
-Defined in: [base-adapter.ts:74](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L74)
+Defined in: [base-adapter.ts:70](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L70)
#### Parameters
@@ -255,7 +255,7 @@ Defined in: [base-adapter.ts:74](https://github.com/TanStack/ai/blob/main/packag
abstract createEmbeddings(options): Promise;
```
-Defined in: [base-adapter.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L79)
+Defined in: [base-adapter.ts:75](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L75)
#### Parameters
@@ -279,7 +279,7 @@ Defined in: [base-adapter.ts:79](https://github.com/TanStack/ai/blob/main/packag
protected generateId(): string;
```
-Defined in: [base-adapter.ts:81](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L81)
+Defined in: [base-adapter.ts:77](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L77)
#### Returns
@@ -293,7 +293,7 @@ Defined in: [base-adapter.ts:81](https://github.com/TanStack/ai/blob/main/packag
abstract summarize(options): Promise;
```
-Defined in: [base-adapter.ts:76](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L76)
+Defined in: [base-adapter.ts:72](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/base-adapter.ts#L72)
#### Parameters
diff --git a/docs/reference/classes/StreamProcessor.md b/docs/reference/classes/StreamProcessor.md
index 8acdf904..d7d3e141 100644
--- a/docs/reference/classes/StreamProcessor.md
+++ b/docs/reference/classes/StreamProcessor.md
@@ -165,7 +165,7 @@ Clear all messages
finalizeStream(): void;
```
-Defined in: [stream/processor.ts:951](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L951)
+Defined in: [stream/processor.ts:1033](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1033)
Finalize the stream - complete all pending operations
@@ -197,7 +197,7 @@ Get current messages
getRecording(): ChunkRecording | null;
```
-Defined in: [stream/processor.ts:1037](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1037)
+Defined in: [stream/processor.ts:1533](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1533)
Get the current recording
@@ -213,7 +213,7 @@ Get the current recording
getState(): ProcessorState;
```
-Defined in: [stream/processor.ts:1010](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1010)
+Defined in: [stream/processor.ts:1092](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1092)
Get current processor state (legacy)
@@ -295,7 +295,7 @@ Remove messages after a certain index (for reload/retry)
reset(): void;
```
-Defined in: [stream/processor.ts:1060](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1060)
+Defined in: [stream/processor.ts:1556](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1556)
Full reset (including messages)
@@ -350,7 +350,7 @@ Returns the message ID
startRecording(): void;
```
-Defined in: [stream/processor.ts:1024](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1024)
+Defined in: [stream/processor.ts:1520](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1520)
Start recording chunks
@@ -388,7 +388,7 @@ Get the conversation as ModelMessages (for sending to LLM)
static replay(recording, options?): Promise;
```
-Defined in: [stream/processor.ts:1069](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1069)
+Defined in: [stream/processor.ts:1565](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1565)
Replay a recording through the processor
diff --git a/docs/reference/classes/ToolCallManager.md b/docs/reference/classes/ToolCallManager.md
index 7b0c7894..234aa302 100644
--- a/docs/reference/classes/ToolCallManager.md
+++ b/docs/reference/classes/ToolCallManager.md
@@ -5,7 +5,7 @@ title: ToolCallManager
# Class: ToolCallManager
-Defined in: [tools/tool-calls.ts:51](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L51)
+Defined in: [tools/tool-calls.ts:53](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L53)
Manages tool call accumulation and execution for the chat() method's automatic tool execution loop.
@@ -47,7 +47,7 @@ if (manager.hasToolCalls()) {
new ToolCallManager(tools): ToolCallManager;
```
-Defined in: [tools/tool-calls.ts:55](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L55)
+Defined in: [tools/tool-calls.ts:57](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L57)
#### Parameters
@@ -61,15 +61,37 @@ readonly [`Tool`](../interfaces/Tool.md)\<`ZodType`\<`unknown`, `unknown`, `$Zod
## Methods
-### addToolCallChunk()
+### addToolCallArgsEvent()
+
+```ts
+addToolCallArgsEvent(chunk): void;
+```
+
+Defined in: [tools/tool-calls.ts:79](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L79)
+
+Add a TOOL_CALL_ARGS event to accumulate arguments
+
+#### Parameters
+
+##### chunk
+
+[`ToolCallArgsEvent`](../interfaces/ToolCallArgsEvent.md)
+
+#### Returns
+
+`void`
+
+***
+
+### ~~addToolCallChunk()~~
```ts
addToolCallChunk(chunk): void;
```
-Defined in: [tools/tool-calls.ts:63](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L63)
+Defined in: [tools/tool-calls.ts:106](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L106)
-Add a tool call chunk to the accumulator
+Add a tool call chunk to the accumulator (legacy format)
Handles streaming tool calls by accumulating arguments
#### Parameters
@@ -118,6 +140,32 @@ Handles streaming tool calls by accumulating arguments
`void`
+#### Deprecated
+
+Use addToolCallStartEvent and addToolCallArgsEvent instead
+
+***
+
+### addToolCallStartEvent()
+
+```ts
+addToolCallStartEvent(chunk): void;
+```
+
+Defined in: [tools/tool-calls.ts:64](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L64)
+
+Add a TOOL_CALL_START event to begin tracking a tool call
+
+#### Parameters
+
+##### chunk
+
+[`ToolCallStartEvent`](../interfaces/ToolCallStartEvent.md)
+
+#### Returns
+
+`void`
+
***
### clear()
@@ -126,7 +174,7 @@ Handles streaming tool calls by accumulating arguments
clear(): void;
```
-Defined in: [tools/tool-calls.ts:208](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L208)
+Defined in: [tools/tool-calls.ts:251](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L251)
Clear the tool calls map for the next iteration
@@ -136,29 +184,55 @@ Clear the tool calls map for the next iteration
***
+### completeToolCall()
+
+```ts
+completeToolCall(toolCallId, input?): void;
+```
+
+Defined in: [tools/tool-calls.ts:92](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L92)
+
+Complete a tool call with its final input
+
+#### Parameters
+
+##### toolCallId
+
+`string`
+
+##### input?
+
+`any`
+
+#### Returns
+
+`void`
+
+***
+
### executeTools()
```ts
-executeTools(doneChunk): AsyncGenerator[]
| null>[], void>;
```
-Defined in: [tools/tool-calls.ts:121](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L121)
+Defined in: [tools/tool-calls.ts:164](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L164)
Execute all tool calls and return tool result messages
-Also yields tool_result chunks for streaming
+Also yields TOOL_CALL_END events for streaming
#### Parameters
##### doneChunk
-[`DoneStreamChunk`](../interfaces/DoneStreamChunk.md)
+[`RunFinishedEvent`](../interfaces/RunFinishedEvent.md)
#### Returns
-`AsyncGenerator`\<[`ToolResultStreamChunk`](../interfaces/ToolResultStreamChunk.md), [`ModelMessage`](../interfaces/ModelMessage.md)\<
+`AsyncGenerator`\<[`ToolCallEndEvent`](../interfaces/ToolCallEndEvent.md), [`ModelMessage`](../interfaces/ModelMessage.md)\<
\| `string`
\| [`ContentPart`](../type-aliases/ContentPart.md)\<`unknown`, `unknown`, `unknown`, `unknown`, `unknown`\>[]
\| `null`\>[], `void`\>
@@ -171,7 +245,7 @@ Also yields tool_result chunks for streaming
getToolCalls(): ToolCall[];
```
-Defined in: [tools/tool-calls.ts:111](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L111)
+Defined in: [tools/tool-calls.ts:154](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L154)
Get all complete tool calls (filtered for valid ID and name)
@@ -187,7 +261,7 @@ Get all complete tool calls (filtered for valid ID and name)
hasToolCalls(): boolean;
```
-Defined in: [tools/tool-calls.ts:104](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L104)
+Defined in: [tools/tool-calls.ts:147](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/tools/tool-calls.ts#L147)
Check if there are any complete tool calls to execute
diff --git a/docs/reference/functions/chat.md b/docs/reference/functions/chat.md
index 16934d76..5a37f3f8 100644
--- a/docs/reference/functions/chat.md
+++ b/docs/reference/functions/chat.md
@@ -9,7 +9,7 @@ title: chat
function chat(options): AsyncIterable;
```
-Defined in: [core/chat.ts:741](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/chat.ts#L741)
+Defined in: [core/chat.ts:880](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/core/chat.ts#L880)
Standalone chat streaming function with type inference from adapter
Returns an async iterable of StreamChunks for streaming responses
diff --git a/docs/reference/functions/createReplayStream.md b/docs/reference/functions/createReplayStream.md
index 39ec2a49..6df2e4e5 100644
--- a/docs/reference/functions/createReplayStream.md
+++ b/docs/reference/functions/createReplayStream.md
@@ -9,7 +9,7 @@ title: createReplayStream
function createReplayStream(recording): AsyncIterable;
```
-Defined in: [stream/processor.ts:1081](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1081)
+Defined in: [stream/processor.ts:1577](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/stream/processor.ts#L1577)
Create an async iterable from a recording
diff --git a/docs/reference/functions/toStreamResponse.md b/docs/reference/functions/toStreamResponse.md
index 0753057c..3d920408 100644
--- a/docs/reference/functions/toStreamResponse.md
+++ b/docs/reference/functions/toStreamResponse.md
@@ -9,7 +9,7 @@ title: toStreamResponse
function toStreamResponse(stream, init?): Response;
```
-Defined in: [utilities/stream-to-response.ts:102](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L102)
+Defined in: [utilities/stream-to-response.ts:103](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/utilities/stream-to-response.ts#L103)
Create a streaming HTTP response from a StreamChunk async iterable
Includes proper headers for Server-Sent Events
diff --git a/docs/reference/index.md b/docs/reference/index.md
index a3d506c0..b6b4e4d4 100644
--- a/docs/reference/index.md
+++ b/docs/reference/index.md
@@ -22,22 +22,23 @@ title: "@tanstack/ai"
- [AgentLoopState](interfaces/AgentLoopState.md)
- [AIAdapter](interfaces/AIAdapter.md)
- [AIAdapterConfig](interfaces/AIAdapterConfig.md)
-- [ApprovalRequestedStreamChunk](interfaces/ApprovalRequestedStreamChunk.md)
+- [~~ApprovalRequestedStreamChunk~~](interfaces/ApprovalRequestedStreamChunk.md)
- [AudioPart](interfaces/AudioPart.md)
-- [BaseStreamChunk](interfaces/BaseStreamChunk.md)
+- [BaseEvent](interfaces/BaseEvent.md)
- [ChatCompletionChunk](interfaces/ChatCompletionChunk.md)
- [ChatOptions](interfaces/ChatOptions.md)
- [ChunkRecording](interfaces/ChunkRecording.md)
- [ChunkStrategy](interfaces/ChunkStrategy.md)
- [ClientTool](interfaces/ClientTool.md)
- [ContentPartSource](interfaces/ContentPartSource.md)
-- [ContentStreamChunk](interfaces/ContentStreamChunk.md)
+- [~~ContentStreamChunk~~](interfaces/ContentStreamChunk.md)
+- [CustomEvent](interfaces/CustomEvent.md)
- [DefaultMessageMetadataByModality](interfaces/DefaultMessageMetadataByModality.md)
- [DocumentPart](interfaces/DocumentPart.md)
-- [DoneStreamChunk](interfaces/DoneStreamChunk.md)
+- [~~DoneStreamChunk~~](interfaces/DoneStreamChunk.md)
- [EmbeddingOptions](interfaces/EmbeddingOptions.md)
- [EmbeddingResult](interfaces/EmbeddingResult.md)
-- [ErrorStreamChunk](interfaces/ErrorStreamChunk.md)
+- [~~ErrorStreamChunk~~](interfaces/ErrorStreamChunk.md)
- [ImagePart](interfaces/ImagePart.md)
- [InternalToolCallState](interfaces/InternalToolCallState.md)
- [JSONParser](interfaces/JSONParser.md)
@@ -46,26 +47,39 @@ title: "@tanstack/ai"
- [ProcessorResult](interfaces/ProcessorResult.md)
- [ProcessorState](interfaces/ProcessorState.md)
- [ResponseFormat](interfaces/ResponseFormat.md)
+- [RunErrorEvent](interfaces/RunErrorEvent.md)
+- [RunFinishedEvent](interfaces/RunFinishedEvent.md)
+- [RunStartedEvent](interfaces/RunStartedEvent.md)
- [ServerTool](interfaces/ServerTool.md)
+- [StateDeltaEvent](interfaces/StateDeltaEvent.md)
+- [StateSnapshotEvent](interfaces/StateSnapshotEvent.md)
+- [StepFinishedEvent](interfaces/StepFinishedEvent.md)
+- [StepStartedEvent](interfaces/StepStartedEvent.md)
- [StreamProcessorEvents](interfaces/StreamProcessorEvents.md)
- [StreamProcessorHandlers](interfaces/StreamProcessorHandlers.md)
- [StreamProcessorOptions](interfaces/StreamProcessorOptions.md)
- [SummarizationOptions](interfaces/SummarizationOptions.md)
- [SummarizationResult](interfaces/SummarizationResult.md)
+- [TextMessageContentEvent](interfaces/TextMessageContentEvent.md)
+- [TextMessageEndEvent](interfaces/TextMessageEndEvent.md)
+- [TextMessageStartEvent](interfaces/TextMessageStartEvent.md)
- [TextPart](interfaces/TextPart.md)
- [ThinkingPart](interfaces/ThinkingPart.md)
-- [ThinkingStreamChunk](interfaces/ThinkingStreamChunk.md)
+- [~~ThinkingStreamChunk~~](interfaces/ThinkingStreamChunk.md)
- [Tool](interfaces/Tool.md)
- [ToolCall](interfaces/ToolCall.md)
+- [ToolCallArgsEvent](interfaces/ToolCallArgsEvent.md)
+- [ToolCallEndEvent](interfaces/ToolCallEndEvent.md)
- [ToolCallPart](interfaces/ToolCallPart.md)
-- [ToolCallStreamChunk](interfaces/ToolCallStreamChunk.md)
+- [ToolCallStartEvent](interfaces/ToolCallStartEvent.md)
+- [~~ToolCallStreamChunk~~](interfaces/ToolCallStreamChunk.md)
- [ToolConfig](interfaces/ToolConfig.md)
- [ToolDefinition](interfaces/ToolDefinition.md)
- [ToolDefinitionConfig](interfaces/ToolDefinitionConfig.md)
- [ToolDefinitionInstance](interfaces/ToolDefinitionInstance.md)
-- [ToolInputAvailableStreamChunk](interfaces/ToolInputAvailableStreamChunk.md)
+- [~~ToolInputAvailableStreamChunk~~](interfaces/ToolInputAvailableStreamChunk.md)
- [ToolResultPart](interfaces/ToolResultPart.md)
-- [ToolResultStreamChunk](interfaces/ToolResultStreamChunk.md)
+- [~~ToolResultStreamChunk~~](interfaces/ToolResultStreamChunk.md)
- [UIMessage](interfaces/UIMessage.md)
- [VideoPart](interfaces/VideoPart.md)
@@ -79,6 +93,7 @@ title: "@tanstack/ai"
- [ConstrainedModelMessage](type-aliases/ConstrainedModelMessage.md)
- [ContentPart](type-aliases/ContentPart.md)
- [ContentPartForModalities](type-aliases/ContentPartForModalities.md)
+- [EventType](type-aliases/EventType.md)
- [ExtractModalitiesForModel](type-aliases/ExtractModalitiesForModel.md)
- [ExtractModelsFromAdapter](type-aliases/ExtractModelsFromAdapter.md)
- [InferSchemaType](type-aliases/InferSchemaType.md)
diff --git a/docs/reference/interfaces/AIAdapter.md b/docs/reference/interfaces/AIAdapter.md
index 2ad47311..9794f317 100644
--- a/docs/reference/interfaces/AIAdapter.md
+++ b/docs/reference/interfaces/AIAdapter.md
@@ -5,7 +5,7 @@ title: AIAdapter
# Interface: AIAdapter\
-Defined in: [types.ts:756](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L756)
+Defined in: [types.ts:1018](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1018)
AI adapter interface with support for endpoint-specific models and provider options.
@@ -56,7 +56,7 @@ Generic parameters:
optional _chatProviderOptions: TChatProviderOptions;
```
-Defined in: [types.ts:783](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L783)
+Defined in: [types.ts:1043](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1043)
***
@@ -66,7 +66,7 @@ Defined in: [types.ts:783](https://github.com/TanStack/ai/blob/main/packages/typ
optional _embeddingProviderOptions: TEmbeddingProviderOptions;
```
-Defined in: [types.ts:784](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L784)
+Defined in: [types.ts:1044](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1044)
***
@@ -76,7 +76,7 @@ Defined in: [types.ts:784](https://github.com/TanStack/ai/blob/main/packages/typ
optional _messageMetadataByModality: TMessageMetadataByModality;
```
-Defined in: [types.ts:801](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L801)
+Defined in: [types.ts:1061](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1061)
Type-only map from modality type to adapter-specific metadata types.
Used to provide type-safe autocomplete for metadata on content parts.
@@ -89,7 +89,7 @@ Used to provide type-safe autocomplete for metadata on content parts.
optional _modelInputModalitiesByName: TModelInputModalitiesByName;
```
-Defined in: [types.ts:796](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L796)
+Defined in: [types.ts:1056](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1056)
Type-only map from model name to its supported input modalities.
Used by the core AI types to narrow ContentPart types based on the selected model.
@@ -103,7 +103,7 @@ Must be provided by all adapters.
_modelProviderOptionsByName: TModelProviderOptionsByName;
```
-Defined in: [types.ts:790](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L790)
+Defined in: [types.ts:1050](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1050)
Type-only map from model name to its specific provider options.
Used by the core AI types to narrow providerOptions based on the selected model.
@@ -117,7 +117,7 @@ Must be provided by all adapters.
optional _providerOptions: TChatProviderOptions;
```
-Defined in: [types.ts:782](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L782)
+Defined in: [types.ts:1042](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1042)
***
@@ -127,7 +127,7 @@ Defined in: [types.ts:782](https://github.com/TanStack/ai/blob/main/packages/typ
chatStream: (options) => AsyncIterable;
```
-Defined in: [types.ts:804](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L804)
+Defined in: [types.ts:1064](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1064)
#### Parameters
@@ -147,7 +147,7 @@ Defined in: [types.ts:804](https://github.com/TanStack/ai/blob/main/packages/typ
createEmbeddings: (options) => Promise;
```
-Defined in: [types.ts:812](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L812)
+Defined in: [types.ts:1072](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1072)
#### Parameters
@@ -167,7 +167,7 @@ Defined in: [types.ts:812](https://github.com/TanStack/ai/blob/main/packages/typ
optional embeddingModels: TEmbeddingModels;
```
-Defined in: [types.ts:779](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L779)
+Defined in: [types.ts:1039](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1039)
Models that support embeddings
@@ -179,7 +179,7 @@ Models that support embeddings
models: TChatModels;
```
-Defined in: [types.ts:776](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L776)
+Defined in: [types.ts:1036](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1036)
Models that support chat/text completion
@@ -191,7 +191,7 @@ Models that support chat/text completion
name: string;
```
-Defined in: [types.ts:774](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L774)
+Defined in: [types.ts:1034](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1034)
***
@@ -201,7 +201,7 @@ Defined in: [types.ts:774](https://github.com/TanStack/ai/blob/main/packages/typ
summarize: (options) => Promise;
```
-Defined in: [types.ts:809](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L809)
+Defined in: [types.ts:1069](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1069)
#### Parameters
diff --git a/docs/reference/interfaces/AIAdapterConfig.md b/docs/reference/interfaces/AIAdapterConfig.md
index 76abb781..97e4eac9 100644
--- a/docs/reference/interfaces/AIAdapterConfig.md
+++ b/docs/reference/interfaces/AIAdapterConfig.md
@@ -5,7 +5,7 @@ title: AIAdapterConfig
# Interface: AIAdapterConfig
-Defined in: [types.ts:815](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L815)
+Defined in: [types.ts:1075](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1075)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:815](https://github.com/TanStack/ai/blob/main/packages/typ
optional apiKey: string;
```
-Defined in: [types.ts:816](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L816)
+Defined in: [types.ts:1076](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1076)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:816](https://github.com/TanStack/ai/blob/main/packages/typ
optional baseUrl: string;
```
-Defined in: [types.ts:817](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L817)
+Defined in: [types.ts:1077](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1077)
***
@@ -35,7 +35,7 @@ Defined in: [types.ts:817](https://github.com/TanStack/ai/blob/main/packages/typ
optional headers: Record;
```
-Defined in: [types.ts:820](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L820)
+Defined in: [types.ts:1080](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1080)
***
@@ -45,7 +45,7 @@ Defined in: [types.ts:820](https://github.com/TanStack/ai/blob/main/packages/typ
optional maxRetries: number;
```
-Defined in: [types.ts:819](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L819)
+Defined in: [types.ts:1079](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1079)
***
@@ -55,4 +55,4 @@ Defined in: [types.ts:819](https://github.com/TanStack/ai/blob/main/packages/typ
optional timeout: number;
```
-Defined in: [types.ts:818](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L818)
+Defined in: [types.ts:1078](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1078)
diff --git a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md
index ab57b5dd..be85be3c 100644
--- a/docs/reference/interfaces/ApprovalRequestedStreamChunk.md
+++ b/docs/reference/interfaces/ApprovalRequestedStreamChunk.md
@@ -3,31 +3,31 @@ id: ApprovalRequestedStreamChunk
title: ApprovalRequestedStreamChunk
---
-# Interface: ApprovalRequestedStreamChunk
+# ~~Interface: ApprovalRequestedStreamChunk~~
-Defined in: [types.ts:645](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L645)
+Defined in: [types.ts:917](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L917)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use CustomEvent with name='approval-requested' instead
## Properties
-### approval
+### ~~approval?~~
```ts
-approval: object;
+optional approval: object;
```
-Defined in: [types.ts:650](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L650)
+Defined in: [types.ts:925](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L925)
-#### id
+#### ~~id~~
```ts
id: string;
```
-#### needsApproval
+#### ~~needsApproval~~
```ts
needsApproval: true;
@@ -35,86 +35,70 @@ needsApproval: true;
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:919](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L919)
***
-### input
+### ~~input~~
```ts
-input: any;
+input: Record;
```
-Defined in: [types.ts:649](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L649)
+Defined in: [types.ts:924](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L924)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:920](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L920)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:921](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L921)
***
-### toolCallId
+### ~~toolCallId~~
```ts
toolCallId: string;
```
-Defined in: [types.ts:647](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L647)
+Defined in: [types.ts:922](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L922)
***
-### toolName
+### ~~toolName~~
```ts
toolName: string;
```
-Defined in: [types.ts:648](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L648)
+Defined in: [types.ts:923](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L923)
***
-### type
+### ~~type~~
```ts
type: "approval-requested";
```
-Defined in: [types.ts:646](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L646)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:918](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L918)
diff --git a/docs/reference/interfaces/BaseEvent.md b/docs/reference/interfaces/BaseEvent.md
new file mode 100644
index 00000000..587c093b
--- /dev/null
+++ b/docs/reference/interfaces/BaseEvent.md
@@ -0,0 +1,72 @@
+---
+id: BaseEvent
+title: BaseEvent
+---
+
+# Interface: BaseEvent
+
+Defined in: [types.ts:625](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L625)
+
+Base structure for all AG-UI events.
+Extends AG-UI spec with TanStack AI additions (model field).
+
+## Extended by
+
+- [`RunStartedEvent`](RunStartedEvent.md)
+- [`RunFinishedEvent`](RunFinishedEvent.md)
+- [`RunErrorEvent`](RunErrorEvent.md)
+- [`TextMessageStartEvent`](TextMessageStartEvent.md)
+- [`TextMessageContentEvent`](TextMessageContentEvent.md)
+- [`TextMessageEndEvent`](TextMessageEndEvent.md)
+- [`ToolCallStartEvent`](ToolCallStartEvent.md)
+- [`ToolCallArgsEvent`](ToolCallArgsEvent.md)
+- [`ToolCallEndEvent`](ToolCallEndEvent.md)
+- [`StepStartedEvent`](StepStartedEvent.md)
+- [`StepFinishedEvent`](StepFinishedEvent.md)
+- [`StateSnapshotEvent`](StateSnapshotEvent.md)
+- [`StateDeltaEvent`](StateDeltaEvent.md)
+- [`CustomEvent`](CustomEvent.md)
+
+## Properties
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+***
+
+### type
+
+```ts
+type: EventType;
+```
+
+Defined in: [types.ts:626](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L626)
diff --git a/docs/reference/interfaces/BaseStreamChunk.md b/docs/reference/interfaces/BaseStreamChunk.md
deleted file mode 100644
index 81481d16..00000000
--- a/docs/reference/interfaces/BaseStreamChunk.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-id: BaseStreamChunk
-title: BaseStreamChunk
----
-
-# Interface: BaseStreamChunk
-
-Defined in: [types.ts:594](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L594)
-
-## Extended by
-
-- [`ContentStreamChunk`](ContentStreamChunk.md)
-- [`ToolCallStreamChunk`](ToolCallStreamChunk.md)
-- [`ToolResultStreamChunk`](ToolResultStreamChunk.md)
-- [`DoneStreamChunk`](DoneStreamChunk.md)
-- [`ErrorStreamChunk`](ErrorStreamChunk.md)
-- [`ApprovalRequestedStreamChunk`](ApprovalRequestedStreamChunk.md)
-- [`ToolInputAvailableStreamChunk`](ToolInputAvailableStreamChunk.md)
-- [`ThinkingStreamChunk`](ThinkingStreamChunk.md)
-
-## Properties
-
-### id
-
-```ts
-id: string;
-```
-
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-***
-
-### model
-
-```ts
-model: string;
-```
-
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-***
-
-### timestamp
-
-```ts
-timestamp: number;
-```
-
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-***
-
-### type
-
-```ts
-type: StreamChunkType;
-```
-
-Defined in: [types.ts:595](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L595)
diff --git a/docs/reference/interfaces/ChatCompletionChunk.md b/docs/reference/interfaces/ChatCompletionChunk.md
index 78235a12..3e4ac8ec 100644
--- a/docs/reference/interfaces/ChatCompletionChunk.md
+++ b/docs/reference/interfaces/ChatCompletionChunk.md
@@ -5,7 +5,7 @@ title: ChatCompletionChunk
# Interface: ChatCompletionChunk
-Defined in: [types.ts:684](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L684)
+Defined in: [types.ts:946](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L946)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:684](https://github.com/TanStack/ai/blob/main/packages/typ
content: string;
```
-Defined in: [types.ts:687](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L687)
+Defined in: [types.ts:949](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L949)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:687](https://github.com/TanStack/ai/blob/main/packages/typ
optional finishReason: "length" | "stop" | "content_filter" | null;
```
-Defined in: [types.ts:689](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L689)
+Defined in: [types.ts:951](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L951)
***
@@ -35,7 +35,7 @@ Defined in: [types.ts:689](https://github.com/TanStack/ai/blob/main/packages/typ
id: string;
```
-Defined in: [types.ts:685](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L685)
+Defined in: [types.ts:947](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L947)
***
@@ -45,7 +45,7 @@ Defined in: [types.ts:685](https://github.com/TanStack/ai/blob/main/packages/typ
model: string;
```
-Defined in: [types.ts:686](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L686)
+Defined in: [types.ts:948](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L948)
***
@@ -55,7 +55,7 @@ Defined in: [types.ts:686](https://github.com/TanStack/ai/blob/main/packages/typ
optional role: "assistant";
```
-Defined in: [types.ts:688](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L688)
+Defined in: [types.ts:950](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L950)
***
@@ -65,7 +65,7 @@ Defined in: [types.ts:688](https://github.com/TanStack/ai/blob/main/packages/typ
optional usage: object;
```
-Defined in: [types.ts:690](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L690)
+Defined in: [types.ts:952](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L952)
#### completionTokens
diff --git a/docs/reference/interfaces/ContentStreamChunk.md b/docs/reference/interfaces/ContentStreamChunk.md
index e03782a4..626e564b 100644
--- a/docs/reference/interfaces/ContentStreamChunk.md
+++ b/docs/reference/interfaces/ContentStreamChunk.md
@@ -3,96 +3,86 @@ id: ContentStreamChunk
title: ContentStreamChunk
---
-# Interface: ContentStreamChunk
+# ~~Interface: ContentStreamChunk~~
-Defined in: [types.ts:601](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L601)
+Defined in: [types.ts:833](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L833)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use TextMessageContentEvent instead
## Properties
-### content
+### ~~content~~
```ts
content: string;
```
-Defined in: [types.ts:604](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L604)
+Defined in: [types.ts:841](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L841)
+
+Full accumulated content so far
***
-### delta
+### ~~delta~~
```ts
delta: string;
```
-Defined in: [types.ts:603](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L603)
+Defined in: [types.ts:839](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L839)
+
+Incremental text delta
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:835](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L835)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:836](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L836)
***
-### role?
+### ~~role?~~
```ts
optional role: "assistant";
```
-Defined in: [types.ts:605](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L605)
+Defined in: [types.ts:843](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L843)
+
+Role of the message
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:837](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L837)
***
-### type
+### ~~type~~
```ts
type: "content";
```
-Defined in: [types.ts:602](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L602)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:834](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L834)
diff --git a/docs/reference/interfaces/CustomEvent.md b/docs/reference/interfaces/CustomEvent.md
new file mode 100644
index 00000000..603163ae
--- /dev/null
+++ b/docs/reference/interfaces/CustomEvent.md
@@ -0,0 +1,95 @@
+---
+id: CustomEvent
+title: CustomEvent
+---
+
+# Interface: CustomEvent
+
+Defined in: [types.ts:783](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L783)
+
+Custom event for extensibility.
+Used for features not covered by standard AG-UI events (e.g., approval flows).
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### name
+
+```ts
+name: string;
+```
+
+Defined in: [types.ts:785](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L785)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "CUSTOM";
+```
+
+Defined in: [types.ts:784](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L784)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
+
+***
+
+### value
+
+```ts
+value: unknown;
+```
+
+Defined in: [types.ts:786](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L786)
diff --git a/docs/reference/interfaces/DefaultMessageMetadataByModality.md b/docs/reference/interfaces/DefaultMessageMetadataByModality.md
index d13b5888..688d91ae 100644
--- a/docs/reference/interfaces/DefaultMessageMetadataByModality.md
+++ b/docs/reference/interfaces/DefaultMessageMetadataByModality.md
@@ -5,7 +5,7 @@ title: DefaultMessageMetadataByModality
# Interface: DefaultMessageMetadataByModality
-Defined in: [types.ts:736](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L736)
+Defined in: [types.ts:998](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L998)
Default metadata type for adapters that don't define custom metadata.
Uses unknown for all modalities.
@@ -18,7 +18,7 @@ Uses unknown for all modalities.
audio: unknown;
```
-Defined in: [types.ts:739](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L739)
+Defined in: [types.ts:1001](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1001)
***
@@ -28,7 +28,7 @@ Defined in: [types.ts:739](https://github.com/TanStack/ai/blob/main/packages/typ
document: unknown;
```
-Defined in: [types.ts:741](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L741)
+Defined in: [types.ts:1003](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1003)
***
@@ -38,7 +38,7 @@ Defined in: [types.ts:741](https://github.com/TanStack/ai/blob/main/packages/typ
image: unknown;
```
-Defined in: [types.ts:738](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L738)
+Defined in: [types.ts:1000](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1000)
***
@@ -48,7 +48,7 @@ Defined in: [types.ts:738](https://github.com/TanStack/ai/blob/main/packages/typ
text: unknown;
```
-Defined in: [types.ts:737](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L737)
+Defined in: [types.ts:999](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L999)
***
@@ -58,4 +58,4 @@ Defined in: [types.ts:737](https://github.com/TanStack/ai/blob/main/packages/typ
video: unknown;
```
-Defined in: [types.ts:740](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L740)
+Defined in: [types.ts:1002](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1002)
diff --git a/docs/reference/interfaces/DoneStreamChunk.md b/docs/reference/interfaces/DoneStreamChunk.md
index a62e11e8..05a287ce 100644
--- a/docs/reference/interfaces/DoneStreamChunk.md
+++ b/docs/reference/interfaces/DoneStreamChunk.md
@@ -3,103 +3,87 @@ id: DoneStreamChunk
title: DoneStreamChunk
---
-# Interface: DoneStreamChunk
+# ~~Interface: DoneStreamChunk~~
-Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+Defined in: [types.ts:849](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L849)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use RunFinishedEvent instead
## Properties
-### finishReason
+### ~~finishReason?~~
```ts
-finishReason: "length" | "stop" | "content_filter" | "tool_calls" | null;
+optional finishReason: "length" | "stop" | "content_filter" | "tool_calls" | null;
```
-Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+Defined in: [types.ts:854](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L854)
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:851](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L851)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:852](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L852)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:853](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L853)
***
-### type
+### ~~type~~
```ts
type: "done";
```
-Defined in: [types.ts:628](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L628)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:850](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L850)
***
-### usage?
+### ~~usage?~~
```ts
optional usage: object;
```
-Defined in: [types.ts:630](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L630)
+Defined in: [types.ts:855](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L855)
-#### completionTokens
+#### ~~completionTokens~~
```ts
completionTokens: number;
```
-#### promptTokens
+#### ~~promptTokens~~
```ts
promptTokens: number;
```
-#### totalTokens
+#### ~~totalTokens~~
```ts
totalTokens: number;
diff --git a/docs/reference/interfaces/EmbeddingOptions.md b/docs/reference/interfaces/EmbeddingOptions.md
index 471035f6..759a0c58 100644
--- a/docs/reference/interfaces/EmbeddingOptions.md
+++ b/docs/reference/interfaces/EmbeddingOptions.md
@@ -5,7 +5,7 @@ title: EmbeddingOptions
# Interface: EmbeddingOptions
-Defined in: [types.ts:716](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L716)
+Defined in: [types.ts:978](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L978)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:716](https://github.com/TanStack/ai/blob/main/packages/typ
optional dimensions: number;
```
-Defined in: [types.ts:719](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L719)
+Defined in: [types.ts:981](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L981)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:719](https://github.com/TanStack/ai/blob/main/packages/typ
input: string | string[];
```
-Defined in: [types.ts:718](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L718)
+Defined in: [types.ts:980](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L980)
***
@@ -35,4 +35,4 @@ Defined in: [types.ts:718](https://github.com/TanStack/ai/blob/main/packages/typ
model: string;
```
-Defined in: [types.ts:717](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L717)
+Defined in: [types.ts:979](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L979)
diff --git a/docs/reference/interfaces/EmbeddingResult.md b/docs/reference/interfaces/EmbeddingResult.md
index 39e385e4..2884de0f 100644
--- a/docs/reference/interfaces/EmbeddingResult.md
+++ b/docs/reference/interfaces/EmbeddingResult.md
@@ -5,7 +5,7 @@ title: EmbeddingResult
# Interface: EmbeddingResult
-Defined in: [types.ts:722](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L722)
+Defined in: [types.ts:984](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L984)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:722](https://github.com/TanStack/ai/blob/main/packages/typ
embeddings: number[][];
```
-Defined in: [types.ts:725](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L725)
+Defined in: [types.ts:987](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L987)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:725](https://github.com/TanStack/ai/blob/main/packages/typ
id: string;
```
-Defined in: [types.ts:723](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L723)
+Defined in: [types.ts:985](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L985)
***
@@ -35,7 +35,7 @@ Defined in: [types.ts:723](https://github.com/TanStack/ai/blob/main/packages/typ
model: string;
```
-Defined in: [types.ts:724](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L724)
+Defined in: [types.ts:986](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L986)
***
@@ -45,7 +45,7 @@ Defined in: [types.ts:724](https://github.com/TanStack/ai/blob/main/packages/typ
usage: object;
```
-Defined in: [types.ts:726](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L726)
+Defined in: [types.ts:988](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L988)
#### promptTokens
diff --git a/docs/reference/interfaces/ErrorStreamChunk.md b/docs/reference/interfaces/ErrorStreamChunk.md
index 57cb491d..5ade040b 100644
--- a/docs/reference/interfaces/ErrorStreamChunk.md
+++ b/docs/reference/interfaces/ErrorStreamChunk.md
@@ -3,88 +3,75 @@ id: ErrorStreamChunk
title: ErrorStreamChunk
---
-# Interface: ErrorStreamChunk
+# ~~Interface: ErrorStreamChunk~~
-Defined in: [types.ts:637](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L637)
+Defined in: [types.ts:865](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L865)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use RunErrorEvent instead
## Properties
-### error
+### ~~code?~~
```ts
-error: object;
+optional code: string;
```
-Defined in: [types.ts:639](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L639)
-
-#### code?
+Defined in: [types.ts:871](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L871)
-```ts
-optional code: string;
-```
+***
-#### message
+### ~~error~~
```ts
-message: string;
+error:
+ | string
+ | {
+ code?: string;
+ message: string;
+};
```
+Defined in: [types.ts:870](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L870)
+
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:867](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L867)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:868](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L868)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:869](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L869)
***
-### type
+### ~~type~~
```ts
type: "error";
```
-Defined in: [types.ts:638](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L638)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:866](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L866)
diff --git a/docs/reference/interfaces/RunErrorEvent.md b/docs/reference/interfaces/RunErrorEvent.md
new file mode 100644
index 00000000..95f8d0ef
--- /dev/null
+++ b/docs/reference/interfaces/RunErrorEvent.md
@@ -0,0 +1,106 @@
+---
+id: RunErrorEvent
+title: RunErrorEvent
+---
+
+# Interface: RunErrorEvent
+
+Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660)
+
+Emitted when an error occurs during a run.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### error
+
+```ts
+error: object;
+```
+
+Defined in: [types.ts:663](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L663)
+
+#### code?
+
+```ts
+optional code: string;
+```
+
+#### message
+
+```ts
+message: string;
+```
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### runId?
+
+```ts
+optional runId: string;
+```
+
+Defined in: [types.ts:662](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L662)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "RUN_ERROR";
+```
+
+Defined in: [types.ts:661](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L661)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/RunFinishedEvent.md b/docs/reference/interfaces/RunFinishedEvent.md
new file mode 100644
index 00000000..3d2876e5
--- /dev/null
+++ b/docs/reference/interfaces/RunFinishedEvent.md
@@ -0,0 +1,122 @@
+---
+id: RunFinishedEvent
+title: RunFinishedEvent
+---
+
+# Interface: RunFinishedEvent
+
+Defined in: [types.ts:646](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L646)
+
+Emitted when a run completes successfully.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### finishReason
+
+```ts
+finishReason: "length" | "stop" | "content_filter" | "tool_calls" | null;
+```
+
+Defined in: [types.ts:649](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L649)
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### runId
+
+```ts
+runId: string;
+```
+
+Defined in: [types.ts:648](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L648)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "RUN_FINISHED";
+```
+
+Defined in: [types.ts:647](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L647)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
+
+***
+
+### usage?
+
+```ts
+optional usage: object;
+```
+
+Defined in: [types.ts:650](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L650)
+
+#### completionTokens
+
+```ts
+completionTokens: number;
+```
+
+#### promptTokens
+
+```ts
+promptTokens: number;
+```
+
+#### totalTokens
+
+```ts
+totalTokens: number;
+```
diff --git a/docs/reference/interfaces/RunStartedEvent.md b/docs/reference/interfaces/RunStartedEvent.md
new file mode 100644
index 00000000..3e976bb2
--- /dev/null
+++ b/docs/reference/interfaces/RunStartedEvent.md
@@ -0,0 +1,94 @@
+---
+id: RunStartedEvent
+title: RunStartedEvent
+---
+
+# Interface: RunStartedEvent
+
+Defined in: [types.ts:637](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L637)
+
+Emitted when a run starts.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### runId
+
+```ts
+runId: string;
+```
+
+Defined in: [types.ts:639](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L639)
+
+***
+
+### threadId?
+
+```ts
+optional threadId: string;
+```
+
+Defined in: [types.ts:640](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L640)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "RUN_STARTED";
+```
+
+Defined in: [types.ts:638](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L638)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/StateDeltaEvent.md b/docs/reference/interfaces/StateDeltaEvent.md
new file mode 100644
index 00000000..3c429088
--- /dev/null
+++ b/docs/reference/interfaces/StateDeltaEvent.md
@@ -0,0 +1,102 @@
+---
+id: StateDeltaEvent
+title: StateDeltaEvent
+---
+
+# Interface: StateDeltaEvent
+
+Defined in: [types.ts:770](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L770)
+
+Emitted for incremental state updates.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### delta
+
+```ts
+delta: object[];
+```
+
+Defined in: [types.ts:772](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L772)
+
+#### op
+
+```ts
+op: "add" | "remove" | "replace";
+```
+
+#### path
+
+```ts
+path: string;
+```
+
+#### value?
+
+```ts
+optional value: unknown;
+```
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "STATE_DELTA";
+```
+
+Defined in: [types.ts:771](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L771)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/StateSnapshotEvent.md b/docs/reference/interfaces/StateSnapshotEvent.md
new file mode 100644
index 00000000..2dd69737
--- /dev/null
+++ b/docs/reference/interfaces/StateSnapshotEvent.md
@@ -0,0 +1,84 @@
+---
+id: StateSnapshotEvent
+title: StateSnapshotEvent
+---
+
+# Interface: StateSnapshotEvent
+
+Defined in: [types.ts:762](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L762)
+
+Emitted for full state synchronization.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### state
+
+```ts
+state: Record;
+```
+
+Defined in: [types.ts:764](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L764)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "STATE_SNAPSHOT";
+```
+
+Defined in: [types.ts:763](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L763)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/StepFinishedEvent.md b/docs/reference/interfaces/StepFinishedEvent.md
new file mode 100644
index 00000000..3a1d39ae
--- /dev/null
+++ b/docs/reference/interfaces/StepFinishedEvent.md
@@ -0,0 +1,108 @@
+---
+id: StepFinishedEvent
+title: StepFinishedEvent
+---
+
+# Interface: StepFinishedEvent
+
+Defined in: [types.ts:750](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L750)
+
+Emitted when a reasoning/thinking step completes or streams content.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### content
+
+```ts
+content: string;
+```
+
+Defined in: [types.ts:756](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L756)
+
+Full accumulated thinking content
+
+***
+
+### delta?
+
+```ts
+optional delta: string;
+```
+
+Defined in: [types.ts:754](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L754)
+
+Incremental thinking token
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### stepId
+
+```ts
+stepId: string;
+```
+
+Defined in: [types.ts:752](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L752)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "STEP_FINISHED";
+```
+
+Defined in: [types.ts:751](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L751)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/StepStartedEvent.md b/docs/reference/interfaces/StepStartedEvent.md
new file mode 100644
index 00000000..b5cce5f6
--- /dev/null
+++ b/docs/reference/interfaces/StepStartedEvent.md
@@ -0,0 +1,94 @@
+---
+id: StepStartedEvent
+title: StepStartedEvent
+---
+
+# Interface: StepStartedEvent
+
+Defined in: [types.ts:741](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L741)
+
+Emitted when a reasoning/thinking step starts.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### stepId
+
+```ts
+stepId: string;
+```
+
+Defined in: [types.ts:743](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L743)
+
+***
+
+### stepType
+
+```ts
+stepType: "thinking" | "reasoning" | "planning";
+```
+
+Defined in: [types.ts:744](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L744)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "STEP_STARTED";
+```
+
+Defined in: [types.ts:742](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L742)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/SummarizationOptions.md b/docs/reference/interfaces/SummarizationOptions.md
index 575580e2..f009ab58 100644
--- a/docs/reference/interfaces/SummarizationOptions.md
+++ b/docs/reference/interfaces/SummarizationOptions.md
@@ -5,7 +5,7 @@ title: SummarizationOptions
# Interface: SummarizationOptions
-Defined in: [types.ts:697](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L697)
+Defined in: [types.ts:959](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L959)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:697](https://github.com/TanStack/ai/blob/main/packages/typ
optional focus: string[];
```
-Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L702)
+Defined in: [types.ts:964](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L964)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typ
optional maxLength: number;
```
-Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L700)
+Defined in: [types.ts:962](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L962)
***
@@ -35,7 +35,7 @@ Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typ
model: string;
```
-Defined in: [types.ts:698](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L698)
+Defined in: [types.ts:960](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L960)
***
@@ -45,7 +45,7 @@ Defined in: [types.ts:698](https://github.com/TanStack/ai/blob/main/packages/typ
optional style: "bullet-points" | "paragraph" | "concise";
```
-Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L701)
+Defined in: [types.ts:963](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L963)
***
@@ -55,4 +55,4 @@ Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typ
text: string;
```
-Defined in: [types.ts:699](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L699)
+Defined in: [types.ts:961](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L961)
diff --git a/docs/reference/interfaces/SummarizationResult.md b/docs/reference/interfaces/SummarizationResult.md
index 390814d8..1dcaa38e 100644
--- a/docs/reference/interfaces/SummarizationResult.md
+++ b/docs/reference/interfaces/SummarizationResult.md
@@ -5,7 +5,7 @@ title: SummarizationResult
# Interface: SummarizationResult
-Defined in: [types.ts:705](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L705)
+Defined in: [types.ts:967](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L967)
## Properties
@@ -15,7 +15,7 @@ Defined in: [types.ts:705](https://github.com/TanStack/ai/blob/main/packages/typ
id: string;
```
-Defined in: [types.ts:706](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L706)
+Defined in: [types.ts:968](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L968)
***
@@ -25,7 +25,7 @@ Defined in: [types.ts:706](https://github.com/TanStack/ai/blob/main/packages/typ
model: string;
```
-Defined in: [types.ts:707](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L707)
+Defined in: [types.ts:969](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L969)
***
@@ -35,7 +35,7 @@ Defined in: [types.ts:707](https://github.com/TanStack/ai/blob/main/packages/typ
summary: string;
```
-Defined in: [types.ts:708](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L708)
+Defined in: [types.ts:970](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L970)
***
@@ -45,7 +45,7 @@ Defined in: [types.ts:708](https://github.com/TanStack/ai/blob/main/packages/typ
usage: object;
```
-Defined in: [types.ts:709](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L709)
+Defined in: [types.ts:971](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L971)
#### completionTokens
diff --git a/docs/reference/interfaces/TextMessageContentEvent.md b/docs/reference/interfaces/TextMessageContentEvent.md
new file mode 100644
index 00000000..0e4274c4
--- /dev/null
+++ b/docs/reference/interfaces/TextMessageContentEvent.md
@@ -0,0 +1,106 @@
+---
+id: TextMessageContentEvent
+title: TextMessageContentEvent
+---
+
+# Interface: TextMessageContentEvent
+
+Defined in: [types.ts:681](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L681)
+
+Emitted when text content is generated (streaming tokens).
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### content?
+
+```ts
+optional content: string;
+```
+
+Defined in: [types.ts:686](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L686)
+
+TanStack AI addition: Full accumulated content so far
+
+***
+
+### delta
+
+```ts
+delta: string;
+```
+
+Defined in: [types.ts:684](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L684)
+
+***
+
+### messageId
+
+```ts
+messageId: string;
+```
+
+Defined in: [types.ts:683](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L683)
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "TEXT_MESSAGE_CONTENT";
+```
+
+Defined in: [types.ts:682](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L682)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/TextMessageEndEvent.md b/docs/reference/interfaces/TextMessageEndEvent.md
new file mode 100644
index 00000000..88d072c8
--- /dev/null
+++ b/docs/reference/interfaces/TextMessageEndEvent.md
@@ -0,0 +1,84 @@
+---
+id: TextMessageEndEvent
+title: TextMessageEndEvent
+---
+
+# Interface: TextMessageEndEvent
+
+Defined in: [types.ts:692](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L692)
+
+Emitted when a text message completes.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### messageId
+
+```ts
+messageId: string;
+```
+
+Defined in: [types.ts:694](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L694)
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "TEXT_MESSAGE_END";
+```
+
+Defined in: [types.ts:693](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L693)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/TextMessageStartEvent.md b/docs/reference/interfaces/TextMessageStartEvent.md
new file mode 100644
index 00000000..6a2fefb6
--- /dev/null
+++ b/docs/reference/interfaces/TextMessageStartEvent.md
@@ -0,0 +1,94 @@
+---
+id: TextMessageStartEvent
+title: TextMessageStartEvent
+---
+
+# Interface: TextMessageStartEvent
+
+Defined in: [types.ts:672](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L672)
+
+Emitted when a text message starts.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### messageId
+
+```ts
+messageId: string;
+```
+
+Defined in: [types.ts:674](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L674)
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### role
+
+```ts
+role: "assistant";
+```
+
+Defined in: [types.ts:675](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L675)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### type
+
+```ts
+type: "TEXT_MESSAGE_START";
+```
+
+Defined in: [types.ts:673](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L673)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/ThinkingStreamChunk.md b/docs/reference/interfaces/ThinkingStreamChunk.md
index d67abbe0..37a6e550 100644
--- a/docs/reference/interfaces/ThinkingStreamChunk.md
+++ b/docs/reference/interfaces/ThinkingStreamChunk.md
@@ -3,86 +3,70 @@ id: ThinkingStreamChunk
title: ThinkingStreamChunk
---
-# Interface: ThinkingStreamChunk
+# ~~Interface: ThinkingStreamChunk~~
-Defined in: [types.ts:663](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L663)
+Defined in: [types.ts:905](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L905)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use StepStartedEvent/StepFinishedEvent instead
## Properties
-### content
+### ~~content~~
```ts
content: string;
```
-Defined in: [types.ts:666](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L666)
+Defined in: [types.ts:911](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L911)
***
-### delta?
+### ~~delta?~~
```ts
optional delta: string;
```
-Defined in: [types.ts:665](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L665)
+Defined in: [types.ts:910](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L910)
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:907](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L907)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:908](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L908)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:909](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L909)
***
-### type
+### ~~type~~
```ts
type: "thinking";
```
-Defined in: [types.ts:664](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L664)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:906](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L906)
diff --git a/docs/reference/interfaces/ToolCallArgsEvent.md b/docs/reference/interfaces/ToolCallArgsEvent.md
new file mode 100644
index 00000000..8db2650e
--- /dev/null
+++ b/docs/reference/interfaces/ToolCallArgsEvent.md
@@ -0,0 +1,108 @@
+---
+id: ToolCallArgsEvent
+title: ToolCallArgsEvent
+---
+
+# Interface: ToolCallArgsEvent
+
+Defined in: [types.ts:716](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L716)
+
+Emitted when tool call arguments are streaming.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### args?
+
+```ts
+optional args: string;
+```
+
+Defined in: [types.ts:722](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L722)
+
+Full accumulated arguments so far
+
+***
+
+### delta
+
+```ts
+delta: string;
+```
+
+Defined in: [types.ts:720](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L720)
+
+Incremental JSON arguments delta
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:718](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L718)
+
+***
+
+### type
+
+```ts
+type: "TOOL_CALL_ARGS";
+```
+
+Defined in: [types.ts:717](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L717)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/ToolCallEndEvent.md b/docs/reference/interfaces/ToolCallEndEvent.md
new file mode 100644
index 00000000..1b10572b
--- /dev/null
+++ b/docs/reference/interfaces/ToolCallEndEvent.md
@@ -0,0 +1,118 @@
+---
+id: ToolCallEndEvent
+title: ToolCallEndEvent
+---
+
+# Interface: ToolCallEndEvent
+
+Defined in: [types.ts:728](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L728)
+
+Emitted when a tool call completes (with optional result).
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### input?
+
+```ts
+optional input: unknown;
+```
+
+Defined in: [types.ts:733](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L733)
+
+Final parsed input arguments
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### result?
+
+```ts
+optional result: unknown;
+```
+
+Defined in: [types.ts:735](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L735)
+
+Tool execution result (present when tool has executed)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:730](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L730)
+
+***
+
+### toolName
+
+```ts
+toolName: string;
+```
+
+Defined in: [types.ts:731](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L731)
+
+***
+
+### type
+
+```ts
+type: "TOOL_CALL_END";
+```
+
+Defined in: [types.ts:729](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L729)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/ToolCallStartEvent.md b/docs/reference/interfaces/ToolCallStartEvent.md
new file mode 100644
index 00000000..75f1af0f
--- /dev/null
+++ b/docs/reference/interfaces/ToolCallStartEvent.md
@@ -0,0 +1,130 @@
+---
+id: ToolCallStartEvent
+title: ToolCallStartEvent
+---
+
+# Interface: ToolCallStartEvent
+
+Defined in: [types.ts:700](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L700)
+
+Emitted when a tool call starts.
+
+## Extends
+
+- [`BaseEvent`](BaseEvent.md)
+
+## Properties
+
+### approval?
+
+```ts
+optional approval: object;
+```
+
+Defined in: [types.ts:707](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L707)
+
+Approval metadata if tool requires user approval
+
+#### id
+
+```ts
+id: string;
+```
+
+#### needsApproval
+
+```ts
+needsApproval: true;
+```
+
+***
+
+### index?
+
+```ts
+optional index: number;
+```
+
+Defined in: [types.ts:705](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L705)
+
+Index for parallel tool calls
+
+***
+
+### model?
+
+```ts
+optional model: string;
+```
+
+Defined in: [types.ts:629](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L629)
+
+TanStack AI addition: Model identifier for multi-model support
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`model`](BaseEvent.md#model)
+
+***
+
+### rawEvent?
+
+```ts
+optional rawEvent: unknown;
+```
+
+Defined in: [types.ts:631](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L631)
+
+Original provider event for debugging/advanced use cases
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`rawEvent`](BaseEvent.md#rawevent)
+
+***
+
+### timestamp
+
+```ts
+timestamp: number;
+```
+
+Defined in: [types.ts:627](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L627)
+
+#### Inherited from
+
+[`BaseEvent`](BaseEvent.md).[`timestamp`](BaseEvent.md#timestamp)
+
+***
+
+### toolCallId
+
+```ts
+toolCallId: string;
+```
+
+Defined in: [types.ts:702](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L702)
+
+***
+
+### toolName
+
+```ts
+toolName: string;
+```
+
+Defined in: [types.ts:703](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L703)
+
+***
+
+### type
+
+```ts
+type: "TOOL_CALL_START";
+```
+
+Defined in: [types.ts:701](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L701)
+
+#### Overrides
+
+[`BaseEvent`](BaseEvent.md).[`type`](BaseEvent.md#type)
diff --git a/docs/reference/interfaces/ToolCallStreamChunk.md b/docs/reference/interfaces/ToolCallStreamChunk.md
index 1f5ab343..9583d1e5 100644
--- a/docs/reference/interfaces/ToolCallStreamChunk.md
+++ b/docs/reference/interfaces/ToolCallStreamChunk.md
@@ -3,116 +3,92 @@ id: ToolCallStreamChunk
title: ToolCallStreamChunk
---
-# Interface: ToolCallStreamChunk
+# ~~Interface: ToolCallStreamChunk~~
-Defined in: [types.ts:608](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L608)
+Defined in: [types.ts:877](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L877)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use ToolCallStartEvent and ToolCallArgsEvent instead
## Properties
-### id
+### ~~approval?~~
```ts
-id: string;
+optional approval: object;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
+Defined in: [types.ts:884](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L884)
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+#### ~~id~~
-***
+```ts
+id: string;
+```
-### index
+#### ~~needsApproval~~
```ts
-index: number;
+needsApproval: true;
```
-Defined in: [types.ts:618](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L618)
-
***
-### model
+### ~~id~~
```ts
-model: string;
+id: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:879](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L879)
***
-### timestamp
+### ~~index~~
```ts
-timestamp: number;
+index: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:883](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L883)
***
-### toolCall
+### ~~model~~
```ts
-toolCall: object;
+model: string;
```
-Defined in: [types.ts:610](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L610)
-
-#### function
+Defined in: [types.ts:880](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L880)
-```ts
-function: object;
-```
+***
-##### function.arguments
+### ~~timestamp~~
```ts
-arguments: string;
+timestamp: number;
```
-##### function.name
+Defined in: [types.ts:881](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L881)
-```ts
-name: string;
-```
+***
-#### id
+### ~~toolCall~~
```ts
-id: string;
+toolCall: ToolCall;
```
-#### type
-
-```ts
-type: "function";
-```
+Defined in: [types.ts:882](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L882)
***
-### type
+### ~~type~~
```ts
type: "tool_call";
```
-Defined in: [types.ts:609](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L609)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:878](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L878)
diff --git a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md
index a7256939..a375e231 100644
--- a/docs/reference/interfaces/ToolInputAvailableStreamChunk.md
+++ b/docs/reference/interfaces/ToolInputAvailableStreamChunk.md
@@ -3,96 +3,80 @@ id: ToolInputAvailableStreamChunk
title: ToolInputAvailableStreamChunk
---
-# Interface: ToolInputAvailableStreamChunk
+# ~~Interface: ToolInputAvailableStreamChunk~~
-Defined in: [types.ts:656](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L656)
+Defined in: [types.ts:934](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L934)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use CustomEvent with name='tool-input-available' instead
## Properties
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:936](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L936)
***
-### input
+### ~~input~~
```ts
-input: any;
+input: Record;
```
-Defined in: [types.ts:660](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L660)
+Defined in: [types.ts:941](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L941)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:937](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L937)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:938](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L938)
***
-### toolCallId
+### ~~toolCallId~~
```ts
toolCallId: string;
```
-Defined in: [types.ts:658](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L658)
+Defined in: [types.ts:939](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L939)
***
-### toolName
+### ~~toolName~~
```ts
toolName: string;
```
-Defined in: [types.ts:659](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L659)
+Defined in: [types.ts:940](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L940)
***
-### type
+### ~~type~~
```ts
type: "tool-input-available";
```
-Defined in: [types.ts:657](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L657)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:935](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L935)
diff --git a/docs/reference/interfaces/ToolResultStreamChunk.md b/docs/reference/interfaces/ToolResultStreamChunk.md
index e5f728a7..57f32e32 100644
--- a/docs/reference/interfaces/ToolResultStreamChunk.md
+++ b/docs/reference/interfaces/ToolResultStreamChunk.md
@@ -3,86 +3,70 @@ id: ToolResultStreamChunk
title: ToolResultStreamChunk
---
-# Interface: ToolResultStreamChunk
+# ~~Interface: ToolResultStreamChunk~~
-Defined in: [types.ts:621](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L621)
+Defined in: [types.ts:893](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L893)
-## Extends
+## Deprecated
-- [`BaseStreamChunk`](BaseStreamChunk.md)
+Use ToolCallEndEvent instead
## Properties
-### content
+### ~~content~~
```ts
content: string;
```
-Defined in: [types.ts:624](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L624)
+Defined in: [types.ts:899](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L899)
***
-### id
+### ~~id~~
```ts
id: string;
```
-Defined in: [types.ts:596](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L596)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`id`](BaseStreamChunk.md#id)
+Defined in: [types.ts:895](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L895)
***
-### model
+### ~~model~~
```ts
model: string;
```
-Defined in: [types.ts:597](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L597)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`model`](BaseStreamChunk.md#model)
+Defined in: [types.ts:896](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L896)
***
-### timestamp
+### ~~timestamp~~
```ts
timestamp: number;
```
-Defined in: [types.ts:598](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L598)
-
-#### Inherited from
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`timestamp`](BaseStreamChunk.md#timestamp)
+Defined in: [types.ts:897](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L897)
***
-### toolCallId
+### ~~toolCallId~~
```ts
toolCallId: string;
```
-Defined in: [types.ts:623](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L623)
+Defined in: [types.ts:898](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L898)
***
-### type
+### ~~type~~
```ts
type: "tool_result";
```
-Defined in: [types.ts:622](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L622)
-
-#### Overrides
-
-[`BaseStreamChunk`](BaseStreamChunk.md).[`type`](BaseStreamChunk.md#type)
+Defined in: [types.ts:894](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L894)
diff --git a/docs/reference/type-aliases/ChatStreamOptionsForModel.md b/docs/reference/type-aliases/ChatStreamOptionsForModel.md
index 651be480..8801022a 100644
--- a/docs/reference/type-aliases/ChatStreamOptionsForModel.md
+++ b/docs/reference/type-aliases/ChatStreamOptionsForModel.md
@@ -9,7 +9,7 @@ title: ChatStreamOptionsForModel
type ChatStreamOptionsForModel = TAdapter extends AIAdapter ? Omit & object : never;
```
-Defined in: [types.ts:883](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L883)
+Defined in: [types.ts:1143](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1143)
Chat options constrained by a specific model's capabilities.
Unlike ChatStreamOptionsUnion which creates a union over all models,
diff --git a/docs/reference/type-aliases/ChatStreamOptionsUnion.md b/docs/reference/type-aliases/ChatStreamOptionsUnion.md
index 02e3cb26..a94cc681 100644
--- a/docs/reference/type-aliases/ChatStreamOptionsUnion.md
+++ b/docs/reference/type-aliases/ChatStreamOptionsUnion.md
@@ -9,7 +9,7 @@ title: ChatStreamOptionsUnion
type ChatStreamOptionsUnion = TAdapter extends AIAdapter ? Models[number] extends infer TModel ? TModel extends string ? Omit & object : never : never : never;
```
-Defined in: [types.ts:823](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L823)
+Defined in: [types.ts:1083](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1083)
## Type Parameters
diff --git a/docs/reference/type-aliases/EventType.md b/docs/reference/type-aliases/EventType.md
new file mode 100644
index 00000000..ce5bea37
--- /dev/null
+++ b/docs/reference/type-aliases/EventType.md
@@ -0,0 +1,43 @@
+---
+id: EventType
+title: EventType
+---
+
+# Type Alias: EventType
+
+```ts
+type EventType =
+ | "RUN_STARTED"
+ | "RUN_FINISHED"
+ | "RUN_ERROR"
+ | "TEXT_MESSAGE_START"
+ | "TEXT_MESSAGE_CONTENT"
+ | "TEXT_MESSAGE_END"
+ | "TOOL_CALL_START"
+ | "TOOL_CALL_ARGS"
+ | "TOOL_CALL_END"
+ | "STEP_STARTED"
+ | "STEP_FINISHED"
+ | "STATE_SNAPSHOT"
+ | "STATE_DELTA"
+ | "CUSTOM"
+ | "content"
+ | "done"
+ | "error"
+ | "tool_call"
+ | "tool_result"
+ | "thinking"
+ | "approval-requested"
+ | "tool-input-available";
+```
+
+Defined in: [types.ts:595](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L595)
+
+AG-UI Protocol event types.
+Based on the AG-UI specification for agent-user interaction.
+
+## See
+
+https://docs.ag-ui.com/concepts/events
+
+Includes legacy type aliases for backward compatibility during migration.
diff --git a/docs/reference/type-aliases/ExtractModalitiesForModel.md b/docs/reference/type-aliases/ExtractModalitiesForModel.md
index fe165380..80b484e5 100644
--- a/docs/reference/type-aliases/ExtractModalitiesForModel.md
+++ b/docs/reference/type-aliases/ExtractModalitiesForModel.md
@@ -9,7 +9,7 @@ title: ExtractModalitiesForModel
type ExtractModalitiesForModel = TAdapter extends AIAdapter ? TModel extends keyof ModelInputModalities ? ModelInputModalities[TModel] : ReadonlyArray : ReadonlyArray;
```
-Defined in: [types.ts:942](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L942)
+Defined in: [types.ts:1202](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1202)
Extract the supported input modalities for a specific model from an adapter.
diff --git a/docs/reference/type-aliases/ExtractModelsFromAdapter.md b/docs/reference/type-aliases/ExtractModelsFromAdapter.md
index 7b1edfb0..a8dd3e29 100644
--- a/docs/reference/type-aliases/ExtractModelsFromAdapter.md
+++ b/docs/reference/type-aliases/ExtractModelsFromAdapter.md
@@ -9,7 +9,7 @@ title: ExtractModelsFromAdapter
type ExtractModelsFromAdapter = T extends AIAdapter ? M[number] : never;
```
-Defined in: [types.ts:936](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L936)
+Defined in: [types.ts:1196](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L1196)
## Type Parameters
diff --git a/docs/reference/type-aliases/StreamChunk.md b/docs/reference/type-aliases/StreamChunk.md
index 7227135c..ba01818f 100644
--- a/docs/reference/type-aliases/StreamChunk.md
+++ b/docs/reference/type-aliases/StreamChunk.md
@@ -7,16 +7,32 @@ title: StreamChunk
```ts
type StreamChunk =
+ | RunStartedEvent
+ | RunFinishedEvent
+ | RunErrorEvent
+ | TextMessageStartEvent
+ | TextMessageContentEvent
+ | TextMessageEndEvent
+ | ToolCallStartEvent
+ | ToolCallArgsEvent
+ | ToolCallEndEvent
+ | StepStartedEvent
+ | StepFinishedEvent
+ | StateSnapshotEvent
+ | StateDeltaEvent
+ | CustomEvent
| ContentStreamChunk
- | ToolCallStreamChunk
- | ToolResultStreamChunk
| DoneStreamChunk
| ErrorStreamChunk
+ | ToolCallStreamChunk
+ | ToolResultStreamChunk
+ | ThinkingStreamChunk
| ApprovalRequestedStreamChunk
- | ToolInputAvailableStreamChunk
- | ThinkingStreamChunk;
+ | ToolInputAvailableStreamChunk;
```
-Defined in: [types.ts:672](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L672)
+Defined in: [types.ts:794](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L794)
-Chunk returned by the sdk during streaming chat completions.
+Union type for all AG-UI events.
+This is the primary type for streaming chat completions.
+Includes legacy types for backward compatibility.
diff --git a/docs/reference/type-aliases/StreamChunkType.md b/docs/reference/type-aliases/StreamChunkType.md
index 3d83a468..eccefe89 100644
--- a/docs/reference/type-aliases/StreamChunkType.md
+++ b/docs/reference/type-aliases/StreamChunkType.md
@@ -6,15 +6,7 @@ title: StreamChunkType
# Type Alias: StreamChunkType
```ts
-type StreamChunkType =
- | "content"
- | "tool_call"
- | "tool_result"
- | "done"
- | "error"
- | "approval-requested"
- | "tool-input-available"
- | "thinking";
+type StreamChunkType = EventType;
```
-Defined in: [types.ts:584](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L584)
+Defined in: [types.ts:821](https://github.com/TanStack/ai/blob/main/packages/typescript/ai/src/types.ts#L821)
diff --git a/packages/php/tanstack-ai/src/StreamChunkConverter.php b/packages/php/tanstack-ai/src/StreamChunkConverter.php
index 39e1e4e7..2bdf4469 100644
--- a/packages/php/tanstack-ai/src/StreamChunkConverter.php
+++ b/packages/php/tanstack-ai/src/StreamChunkConverter.php
@@ -3,7 +3,7 @@
namespace TanStack\AI;
/**
- * Converts provider-specific streaming events to TanStack AI StreamChunk format.
+ * Converts provider-specific streaming events to TanStack AI AG-UI event format.
*
* Supports:
* - Anthropic streaming events
@@ -18,20 +18,28 @@ class StreamChunkConverter
private array $toolCallsMap = [];
private int $currentToolIndex = -1;
private bool $doneEmitted = false;
+
+ // AG-UI lifecycle tracking
+ private string $runId;
+ private string $messageId;
+ private bool $runStartedEmitted = false;
+ private bool $textMessageStarted = false;
public function __construct(string $model, string $provider = 'anthropic')
{
$this->model = $model;
$this->provider = strtolower($provider);
$this->timestamp = (int)(microtime(true) * 1000);
+ $this->runId = $this->generateId();
+ $this->messageId = $this->generateId();
}
/**
- * Generate a unique ID for the chunk
+ * Generate a unique ID for the event
*/
public function generateId(): string
{
- return 'chatcmpl-' . bin2hex(random_bytes(4));
+ return 'evt-' . bin2hex(random_bytes(4));
}
/**
@@ -60,40 +68,86 @@ private function getAttr(mixed $obj, string $attr, mixed $default = null): mixed
}
/**
- * Convert Anthropic streaming event to StreamChunk format
+ * Safely parse JSON string
+ */
+ private function safeJsonParse(string $json): mixed
+ {
+ try {
+ return json_decode($json, true) ?? $json;
+ } catch (\Exception $e) {
+ return $json;
+ }
+ }
+
+ /**
+ * Convert Anthropic streaming event to AG-UI event format
*/
public function convertAnthropicEvent(mixed $event): array
{
$chunks = [];
$eventType = $this->getEventType($event);
+ // Emit RUN_STARTED on first event
+ if (!$this->runStartedEmitted) {
+ $this->runStartedEmitted = true;
+ $chunks[] = [
+ 'type' => 'RUN_STARTED',
+ 'runId' => $this->runId,
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ ];
+ }
+
if ($eventType === 'content_block_start') {
// Tool call is starting
$contentBlock = $this->getAttr($event, 'content_block');
if ($contentBlock && $this->getAttr($contentBlock, 'type') === 'tool_use') {
$this->currentToolIndex++;
+ $toolCallId = $this->getAttr($contentBlock, 'id');
+ $toolName = $this->getAttr($contentBlock, 'name');
$this->toolCallsMap[$this->currentToolIndex] = [
- 'id' => $this->getAttr($contentBlock, 'id'),
- 'name' => $this->getAttr($contentBlock, 'name'),
+ 'id' => $toolCallId,
+ 'name' => $toolName,
'input' => ''
];
+
+ // Emit TOOL_CALL_START
+ $chunks[] = [
+ 'type' => 'TOOL_CALL_START',
+ 'toolCallId' => $toolCallId,
+ 'toolName' => $toolName,
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ 'index' => $this->currentToolIndex,
+ ];
}
} elseif ($eventType === 'content_block_delta') {
$delta = $this->getAttr($event, 'delta');
if ($delta && $this->getAttr($delta, 'type') === 'text_delta') {
+ // Emit TEXT_MESSAGE_START on first text
+ if (!$this->textMessageStarted) {
+ $this->textMessageStarted = true;
+ $chunks[] = [
+ 'type' => 'TEXT_MESSAGE_START',
+ 'messageId' => $this->messageId,
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ 'role' => 'assistant',
+ ];
+ }
+
// Text content delta
$deltaText = $this->getAttr($delta, 'text', '');
$this->accumulatedContent .= $deltaText;
$chunks[] = [
- 'type' => 'content',
- 'id' => $this->generateId(),
+ 'type' => 'TEXT_MESSAGE_CONTENT',
+ 'messageId' => $this->messageId,
'model' => $this->model,
'timestamp' => $this->timestamp,
'delta' => $deltaText,
'content' => $this->accumulatedContent,
- 'role' => 'assistant'
];
} elseif ($delta && $this->getAttr($delta, 'type') === 'input_json_delta') {
// Tool input is being streamed
@@ -104,23 +158,40 @@ public function convertAnthropicEvent(mixed $event): array
$toolCall['input'] .= $partialJson;
$this->toolCallsMap[$this->currentToolIndex] = $toolCall;
+ // Emit TOOL_CALL_ARGS
$chunks[] = [
- 'type' => 'tool_call',
- 'id' => $this->generateId(),
+ 'type' => 'TOOL_CALL_ARGS',
+ 'toolCallId' => $toolCall['id'],
'model' => $this->model,
'timestamp' => $this->timestamp,
- 'toolCall' => [
- 'id' => $toolCall['id'],
- 'type' => 'function',
- 'function' => [
- 'name' => $toolCall['name'],
- 'arguments' => $partialJson // Incremental JSON
- ]
- ],
- 'index' => $this->currentToolIndex
+ 'delta' => $partialJson,
+ 'args' => $toolCall['input'],
];
}
}
+ } elseif ($eventType === 'content_block_stop') {
+ // Emit TEXT_MESSAGE_END if we had text content
+ if ($this->textMessageStarted && $this->accumulatedContent) {
+ $chunks[] = [
+ 'type' => 'TEXT_MESSAGE_END',
+ 'messageId' => $this->messageId,
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ ];
+ }
+
+ // Emit TOOL_CALL_END for tool calls
+ $toolCall = $this->toolCallsMap[$this->currentToolIndex] ?? null;
+ if ($toolCall) {
+ $chunks[] = [
+ 'type' => 'TOOL_CALL_END',
+ 'toolCallId' => $toolCall['id'],
+ 'toolName' => $toolCall['name'],
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ 'input' => $this->safeJsonParse($toolCall['input'] ?: '{}'),
+ ];
+ }
} elseif ($eventType === 'message_delta') {
// Message metadata update (includes stop_reason and usage)
$delta = $this->getAttr($event, 'delta');
@@ -128,7 +199,7 @@ public function convertAnthropicEvent(mixed $event): array
$stopReason = $delta ? $this->getAttr($delta, 'stop_reason') : null;
if ($stopReason) {
- // Map Anthropic stop_reason to TanStack format
+ // Map Anthropic stop_reason to AG-UI format
$finishReason = match ($stopReason) {
'tool_use' => 'tool_calls',
'end_turn' => 'stop',
@@ -146,8 +217,8 @@ public function convertAnthropicEvent(mixed $event): array
$this->doneEmitted = true;
$chunks[] = [
- 'type' => 'done',
- 'id' => $this->generateId(),
+ 'type' => 'RUN_FINISHED',
+ 'runId' => $this->runId,
'model' => $this->model,
'timestamp' => $this->timestamp,
'finishReason' => $finishReason,
@@ -159,8 +230,8 @@ public function convertAnthropicEvent(mixed $event): array
if (!$this->doneEmitted) {
$this->doneEmitted = true;
$chunks[] = [
- 'type' => 'done',
- 'id' => $this->generateId(),
+ 'type' => 'RUN_FINISHED',
+ 'runId' => $this->runId,
'model' => $this->model,
'timestamp' => $this->timestamp,
'finishReason' => 'stop'
@@ -172,12 +243,23 @@ public function convertAnthropicEvent(mixed $event): array
}
/**
- * Convert OpenAI streaming event to StreamChunk format
+ * Convert OpenAI streaming event to AG-UI event format
*/
public function convertOpenAIEvent(mixed $event): array
{
$chunks = [];
+ // Emit RUN_STARTED on first event
+ if (!$this->runStartedEmitted) {
+ $this->runStartedEmitted = true;
+ $chunks[] = [
+ 'type' => 'RUN_STARTED',
+ 'runId' => $this->runId,
+ 'model' => $this->model,
+ 'timestamp' => $this->timestamp,
+ ];
+ }
+
// OpenAI events have chunk.choices[0].delta structure
$choices = $this->getAttr($event, 'choices', []);
$choice = !empty($choices) ? $choices[0] : $event;
@@ -188,15 +270,26 @@ public function convertOpenAIEvent(mixed $event): array
if ($delta) {
$content = $this->getAttr($delta, 'content');
if ($content !== null) {
+ // Emit TEXT_MESSAGE_START on first text
+ if (!$this->textMessageStarted) {
+ $this->textMessageStarted = true;
+ $chunks[] = [
+ 'type' => 'TEXT_MESSAGE_START',
+ 'messageId' => $this->messageId,
+ 'model' => $this->getAttr($event, 'model', $this->model),
+ 'timestamp' => $this->timestamp,
+ 'role' => 'assistant',
+ ];
+ }
+
$this->accumulatedContent .= $content;
$chunks[] = [
- 'type' => 'content',
- 'id' => $this->getAttr($event, 'id', $this->generateId()),
+ 'type' => 'TEXT_MESSAGE_CONTENT',
+ 'messageId' => $this->messageId,
'model' => $this->getAttr($event, 'model', $this->model),
'timestamp' => $this->timestamp,
'delta' => $content,
'content' => $this->accumulatedContent,
- 'role' => 'assistant'
];
}
@@ -205,21 +298,32 @@ public function convertOpenAIEvent(mixed $event): array
if ($toolCalls) {
foreach ($toolCalls as $index => $toolCall) {
$function = $this->getAttr($toolCall, 'function', []);
+ $toolCallId = $this->getAttr($toolCall, 'id', 'call_' . $this->timestamp);
+ $toolName = $this->getAttr($function, 'name', '');
+ $args = $this->getAttr($function, 'arguments', '');
+ $toolIndex = $this->getAttr($toolCall, 'index', $index);
+
+ // Emit TOOL_CALL_START
$chunks[] = [
- 'type' => 'tool_call',
- 'id' => $this->getAttr($event, 'id', $this->generateId()),
+ 'type' => 'TOOL_CALL_START',
+ 'toolCallId' => $toolCallId,
+ 'toolName' => $toolName,
'model' => $this->getAttr($event, 'model', $this->model),
'timestamp' => $this->timestamp,
- 'toolCall' => [
- 'id' => $this->getAttr($toolCall, 'id', 'call_' . $this->timestamp),
- 'type' => 'function',
- 'function' => [
- 'name' => $this->getAttr($function, 'name', ''),
- 'arguments' => $this->getAttr($function, 'arguments', '')
- ]
- ],
- 'index' => $this->getAttr($toolCall, 'index', $index)
+ 'index' => $toolIndex,
];
+
+ // Emit TOOL_CALL_ARGS if there are arguments
+ if ($args) {
+ $chunks[] = [
+ 'type' => 'TOOL_CALL_ARGS',
+ 'toolCallId' => $toolCallId,
+ 'model' => $this->getAttr($event, 'model', $this->model),
+ 'timestamp' => $this->timestamp,
+ 'delta' => $args,
+ 'args' => $args,
+ ];
+ }
}
}
}
@@ -227,6 +331,16 @@ public function convertOpenAIEvent(mixed $event): array
// Handle completion
$finishReason = $this->getAttr($choice, 'finish_reason');
if ($finishReason) {
+ // Emit TEXT_MESSAGE_END if we had text
+ if ($this->textMessageStarted) {
+ $chunks[] = [
+ 'type' => 'TEXT_MESSAGE_END',
+ 'messageId' => $this->messageId,
+ 'model' => $this->getAttr($event, 'model', $this->model),
+ 'timestamp' => $this->timestamp,
+ ];
+ }
+
$usage = $this->getAttr($event, 'usage');
$usageDict = null;
if ($usage) {
@@ -239,8 +353,8 @@ public function convertOpenAIEvent(mixed $event): array
$this->doneEmitted = true;
$chunks[] = [
- 'type' => 'done',
- 'id' => $this->getAttr($event, 'id', $this->generateId()),
+ 'type' => 'RUN_FINISHED',
+ 'runId' => $this->runId,
'model' => $this->getAttr($event, 'model', $this->model),
'timestamp' => $this->timestamp,
'finishReason' => $finishReason,
@@ -279,18 +393,18 @@ public function convertEvent(mixed $event): array
}
/**
- * Convert an error to ErrorStreamChunk format
+ * Convert an error to RUN_ERROR event format
*/
public function convertError(\Throwable $error): array
{
return [
- 'type' => 'error',
- 'id' => $this->generateId(),
+ 'type' => 'RUN_ERROR',
+ 'runId' => $this->runId,
'model' => $this->model,
'timestamp' => $this->timestamp,
'error' => [
'message' => $error->getMessage(),
- 'code' => $error->getCode()
+ 'code' => $error->getCode() ?: null
]
];
}
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/__init__.py b/packages/python/tanstack-ai/src/tanstack_ai/__init__.py
index a55f21e2..cff1a0ba 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/__init__.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/__init__.py
@@ -31,7 +31,7 @@
combine_strategies,
)
-# Types
+# Types - AG-UI Events
from .types import (
# Core types
Tool,
@@ -39,16 +39,27 @@
ModelMessage,
ChatOptions,
AIAdapterConfig,
- # Stream chunk types
+ # AG-UI Event types
+ EventType,
+ BaseEvent,
StreamChunk,
- ContentStreamChunk,
- ThinkingStreamChunk,
- ToolCallStreamChunk,
- ToolInputAvailableStreamChunk,
- ApprovalRequestedStreamChunk,
- ToolResultStreamChunk,
- DoneStreamChunk,
- ErrorStreamChunk,
+ UsageInfo,
+ ErrorInfo,
+ RunStartedEvent,
+ RunFinishedEvent,
+ RunErrorEvent,
+ TextMessageStartEvent,
+ TextMessageContentEvent,
+ TextMessageEndEvent,
+ ToolCallStartEvent,
+ ToolCallArgsEvent,
+ ToolCallEndEvent,
+ StepStartedEvent,
+ StepFinishedEvent,
+ StateSnapshotEvent,
+ StateDeltaEvent,
+ CustomEvent,
+ ApprovalInfo,
# Agent loop types
AgentLoopState,
AgentLoopStrategy,
@@ -83,23 +94,37 @@
"max_iterations",
"until_finish_reason",
"combine_strategies",
- # Types
+ # Core types
"Tool",
"ToolCall",
"ModelMessage",
"ChatOptions",
"AIAdapterConfig",
+ # AG-UI Event types
+ "EventType",
+ "BaseEvent",
"StreamChunk",
- "ContentStreamChunk",
- "ThinkingStreamChunk",
- "ToolCallStreamChunk",
- "ToolInputAvailableStreamChunk",
- "ApprovalRequestedStreamChunk",
- "ToolResultStreamChunk",
- "DoneStreamChunk",
- "ErrorStreamChunk",
+ "UsageInfo",
+ "ErrorInfo",
+ "RunStartedEvent",
+ "RunFinishedEvent",
+ "RunErrorEvent",
+ "TextMessageStartEvent",
+ "TextMessageContentEvent",
+ "TextMessageEndEvent",
+ "ToolCallStartEvent",
+ "ToolCallArgsEvent",
+ "ToolCallEndEvent",
+ "StepStartedEvent",
+ "StepFinishedEvent",
+ "StateSnapshotEvent",
+ "StateDeltaEvent",
+ "CustomEvent",
+ "ApprovalInfo",
+ # Agent loop types
"AgentLoopState",
"AgentLoopStrategy",
+ # Other types
"SummarizationOptions",
"SummarizationResult",
"EmbeddingOptions",
@@ -115,4 +140,3 @@
]
__version__ = "0.1.0"
-
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/anthropic_adapter.py b/packages/python/tanstack-ai/src/tanstack_ai/anthropic_adapter.py
index a3ba0cd2..e6dcdf62 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/anthropic_adapter.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/anthropic_adapter.py
@@ -28,16 +28,15 @@
from .types import (
AIAdapterConfig,
ChatOptions,
- ContentStreamChunk,
- DoneStreamChunk,
EmbeddingOptions,
EmbeddingResult,
- ErrorStreamChunk,
+ RunErrorEvent,
+ RunFinishedEvent,
StreamChunk,
SummarizationOptions,
SummarizationResult,
- ThinkingStreamChunk,
- ToolCallStreamChunk,
+ TextMessageContentEvent,
+ ToolCallStartEvent,
)
@@ -102,7 +101,7 @@ async def chat_stream(self, options: ChatOptions) -> AsyncIterator[StreamChunk]:
options: Chat options
Yields:
- StreamChunk objects
+ StreamChunk objects (AG-UI events)
"""
try:
# Format messages for Anthropic (function returns tuple of (system, messages))
@@ -145,6 +144,7 @@ async def chat_stream(self, options: ChatOptions) -> AsyncIterator[StreamChunk]:
# Make the streaming request
message_id = self._generate_id()
+ run_id = self._generate_id()
accumulated_content = ""
accumulated_thinking = ""
tool_calls: Dict[int, Dict[str, Any]] = {}
@@ -169,11 +169,8 @@ async def chat_stream(self, options: ChatOptions) -> AsyncIterator[StreamChunk]:
# Tool use block
tool_calls[event.index] = {
"id": block.id,
- "type": "function",
- "function": {
- "name": block.name,
- "arguments": "",
- },
+ "name": block.name,
+ "arguments": "",
}
elif event.type == "content_block_delta":
@@ -182,35 +179,34 @@ async def chat_stream(self, options: ChatOptions) -> AsyncIterator[StreamChunk]:
if delta.type == "text_delta":
# Text content delta
accumulated_content += delta.text
- yield ContentStreamChunk(
- type="content",
- id=message_id,
- model=options.model,
- timestamp=timestamp,
- delta=delta.text,
- content=accumulated_content,
- role="assistant",
- )
+ chunk: TextMessageContentEvent = {
+ "type": "TEXT_MESSAGE_CONTENT",
+ "messageId": message_id,
+ "model": options.model,
+ "timestamp": timestamp,
+ "delta": delta.text,
+ "content": accumulated_content,
+ }
+ yield chunk
elif delta.type == "input_json_delta":
# Tool input delta
if event.index in tool_calls:
- tool_calls[event.index]["function"][
- "arguments"
- ] += delta.partial_json
+ tool_calls[event.index]["arguments"] += delta.partial_json
elif event.type == "content_block_stop":
# Content block completed
if event.index in tool_calls:
# Emit tool call chunk
tool_call = tool_calls[event.index]
- yield ToolCallStreamChunk(
- type="tool_call",
- id=message_id,
- model=options.model,
- timestamp=timestamp,
- toolCall=tool_call,
- index=event.index,
- )
+ chunk: ToolCallStartEvent = {
+ "type": "TOOL_CALL_START",
+ "timestamp": timestamp,
+ "model": options.model,
+ "toolCallId": tool_call["id"],
+ "toolName": tool_call["name"],
+ "index": event.index,
+ }
+ yield chunk
elif event.type == "message_delta":
# Message metadata delta (finish reason, usage)
@@ -238,27 +234,29 @@ async def chat_stream(self, options: ChatOptions) -> AsyncIterator[StreamChunk]:
elif final_message.stop_reason == "tool_use":
finish_reason = "tool_calls"
- yield DoneStreamChunk(
- type="done",
- id=message_id,
- model=options.model,
- timestamp=timestamp,
- finishReason=finish_reason,
- usage=usage,
- )
+ done_chunk: RunFinishedEvent = {
+ "type": "RUN_FINISHED",
+ "runId": run_id,
+ "model": options.model,
+ "timestamp": timestamp,
+ "finishReason": finish_reason,
+ "usage": usage,
+ }
+ yield done_chunk
except Exception as e:
# Emit error chunk
- yield ErrorStreamChunk(
- type="error",
- id=self._generate_id(),
- model=options.model,
- timestamp=int(time.time() * 1000),
- error={
+ error_chunk: RunErrorEvent = {
+ "type": "RUN_ERROR",
+ "runId": self._generate_id(),
+ "model": options.model,
+ "timestamp": int(time.time() * 1000),
+ "error": {
"message": str(e),
"code": getattr(e, "code", None),
},
- )
+ }
+ yield error_chunk
def _format_tools(self, tools: List[Any]) -> List[Dict[str, Any]]:
"""
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/chat.py b/packages/python/tanstack-ai/src/tanstack_ai/chat.py
index 93edd10d..126afd33 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/chat.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/chat.py
@@ -22,15 +22,14 @@
)
from .types import (
AgentLoopStrategy,
- ApprovalRequestedStreamChunk,
ChatOptions,
- DoneStreamChunk,
+ CustomEvent,
ModelMessage,
+ RunFinishedEvent,
StreamChunk,
Tool,
ToolCall,
- ToolInputAvailableStreamChunk,
- ToolResultStreamChunk,
+ ToolCallEndEvent,
)
@@ -119,7 +118,7 @@ def __init__(
self.last_finish_reason: Optional[str] = None
self.current_message_id: Optional[str] = None
self.accumulated_content = ""
- self.done_chunk: Optional[DoneStreamChunk] = None
+ self.done_chunk: Optional[RunFinishedEvent] = None
self.should_emit_stream_end = True
self.early_termination = False
self.tool_phase: ToolPhaseResult = ToolPhaseResult.CONTINUE
@@ -128,6 +127,7 @@ def __init__(
# Generate IDs
self.request_id = self._create_id("chat")
self.stream_id = self._create_id("stream")
+ self.run_id = self._create_id("run")
async def chat(self) -> AsyncIterator[StreamChunk]:
"""
@@ -215,17 +215,32 @@ def _handle_stream_chunk(self, chunk: StreamChunk) -> None:
"""
chunk_type = chunk.get("type")
- if chunk_type == "content":
- self.accumulated_content = chunk["content"]
- elif chunk_type == "tool_call":
- self.tool_call_manager.add_tool_call_chunk(chunk)
- elif chunk_type == "done":
+ if chunk_type == "TEXT_MESSAGE_CONTENT":
+ self.accumulated_content = chunk.get("content", "")
+ elif chunk_type == "TOOL_CALL_START":
+ # Build legacy format for tool call manager
+ tool_call_chunk = {
+ "index": chunk.get("index", 0),
+ "toolCall": {
+ "id": chunk.get("toolCallId"),
+ "type": "function",
+ "function": {
+ "name": chunk.get("toolName"),
+ "arguments": "",
+ },
+ },
+ }
+ self.tool_call_manager.add_tool_call_chunk(tool_call_chunk)
+ elif chunk_type == "TOOL_CALL_ARGS":
+ # Accumulate arguments - find the tool call and append
+ pass # Tool call manager handles this via TOOL_CALL_START chunks
+ elif chunk_type == "RUN_FINISHED":
self._handle_done_chunk(chunk)
- elif chunk_type == "error":
+ elif chunk_type == "RUN_ERROR":
self.early_termination = True
self.should_emit_stream_end = False
- def _handle_done_chunk(self, chunk: DoneStreamChunk) -> None:
+ def _handle_done_chunk(self, chunk: RunFinishedEvent) -> None:
"""Handle a done chunk."""
# Don't overwrite a tool_calls finishReason with a stop finishReason
if (
@@ -373,21 +388,20 @@ def _collect_client_state(self) -> tuple[Dict[str, bool], Dict[str, Any]]:
async def _emit_approval_requests(
self,
approval_requests: List[ApprovalRequest],
- done_chunk: DoneStreamChunk,
+ done_chunk: RunFinishedEvent,
) -> AsyncIterator[StreamChunk]:
"""Emit approval request chunks."""
for approval in approval_requests:
- chunk: ApprovalRequestedStreamChunk = {
- "type": "approval-requested",
- "id": done_chunk["id"],
- "model": done_chunk["model"],
+ chunk: CustomEvent = {
+ "type": "CUSTOM",
+ "name": "approval-requested",
"timestamp": int(time.time() * 1000),
- "toolCallId": approval.tool_call_id,
- "toolName": approval.tool_name,
- "input": approval.input,
- "approval": {
- "id": approval.approval_id,
- "needsApproval": True,
+ "model": self.options.model,
+ "value": {
+ "toolCallId": approval.tool_call_id,
+ "toolName": approval.tool_name,
+ "input": approval.input,
+ "approvalId": approval.approval_id,
},
}
yield chunk
@@ -395,37 +409,39 @@ async def _emit_approval_requests(
async def _emit_client_tool_inputs(
self,
client_requests: List[ClientToolRequest],
- done_chunk: DoneStreamChunk,
+ done_chunk: RunFinishedEvent,
) -> AsyncIterator[StreamChunk]:
"""Emit tool-input-available chunks for client execution."""
for client_tool in client_requests:
- chunk: ToolInputAvailableStreamChunk = {
- "type": "tool-input-available",
- "id": done_chunk["id"],
- "model": done_chunk["model"],
+ chunk: CustomEvent = {
+ "type": "CUSTOM",
+ "name": "tool-input-available",
"timestamp": int(time.time() * 1000),
- "toolCallId": client_tool.tool_call_id,
- "toolName": client_tool.tool_name,
- "input": client_tool.input,
+ "model": self.options.model,
+ "value": {
+ "toolCallId": client_tool.tool_call_id,
+ "toolName": client_tool.tool_name,
+ "input": client_tool.input,
+ },
}
yield chunk
async def _emit_tool_results(
self,
results: List[ToolResult],
- done_chunk: DoneStreamChunk,
+ done_chunk: RunFinishedEvent,
) -> AsyncIterator[StreamChunk]:
"""Emit tool result chunks and add to messages."""
for result in results:
content = json.dumps(result.result)
- chunk: ToolResultStreamChunk = {
- "type": "tool_result",
- "id": done_chunk["id"],
- "model": done_chunk["model"],
+ chunk: ToolCallEndEvent = {
+ "type": "TOOL_CALL_END",
"timestamp": int(time.time() * 1000),
+ "model": self.options.model,
"toolCallId": result.tool_call_id,
- "content": content,
+ "toolName": "", # We don't have the tool name here
+ "result": content,
}
yield chunk
@@ -454,11 +470,11 @@ def _get_pending_tool_calls_from_messages(self) -> List[ToolCall]:
return pending
- def _create_synthetic_done_chunk(self) -> DoneStreamChunk:
+ def _create_synthetic_done_chunk(self) -> RunFinishedEvent:
"""Create a synthetic done chunk for pending tool calls."""
return {
- "type": "done",
- "id": self._create_id("pending"),
+ "type": "RUN_FINISHED",
+ "runId": self.run_id,
"model": self.options.model,
"timestamp": int(time.time() * 1000),
"finishReason": "tool_calls",
@@ -533,7 +549,7 @@ async def chat(
... messages=[{"role": "user", "content": "Hello!"}],
... tools=[weather_tool],
... ):
- ... if chunk["type"] == "content":
+ ... if chunk["type"] == "TEXT_MESSAGE_CONTENT":
... print(chunk["delta"], end="", flush=True)
"""
chat_options = ChatOptions(
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/converter.py b/packages/python/tanstack-ai/src/tanstack_ai/converter.py
index 2ee9e541..21fc504c 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/converter.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/converter.py
@@ -2,8 +2,9 @@
TanStack AI Stream Chunk Converter
Converts streaming events from various AI providers (Anthropic, OpenAI)
-into TanStack AI StreamChunk format.
+into TanStack AI AG-UI event format.
"""
+import json
import uuid
from typing import List, Dict, Any, Optional
from datetime import datetime
@@ -11,7 +12,7 @@
class StreamChunkConverter:
"""
- Converts provider-specific streaming events to TanStack AI StreamChunk format.
+ Converts provider-specific streaming events to TanStack AI AG-UI event format.
Supports:
- Anthropic streaming events
@@ -33,10 +34,16 @@ def __init__(self, model: str, provider: str = "anthropic"):
self.tool_calls_map: Dict[int, Dict[str, Any]] = {}
self.current_tool_index = -1
self.done_emitted = False
+
+ # AG-UI lifecycle tracking
+ self.run_id = self.generate_id()
+ self.message_id = self.generate_id()
+ self.run_started_emitted = False
+ self.text_message_started = False
def generate_id(self) -> str:
- """Generate a unique ID for the chunk"""
- return f"chatcmpl-{uuid.uuid4().hex[:8]}"
+ """Generate a unique ID for the event"""
+ return f"evt-{uuid.uuid4().hex[:8]}"
def _get_event_type(self, event: Any) -> str:
"""Get event type from either dict or object"""
@@ -50,38 +57,76 @@ def _get_attr(self, obj: Any, attr: str, default: Any = None) -> Any:
return obj.get(attr, default)
return getattr(obj, attr, default)
+ def _safe_json_parse(self, json_str: str) -> Any:
+ """Safely parse JSON string"""
+ try:
+ return json.loads(json_str)
+ except:
+ return json_str
+
async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
- """Convert Anthropic streaming event to StreamChunk format"""
+ """Convert Anthropic streaming event to AG-UI event format"""
chunks = []
event_type = self._get_event_type(event)
+ # Emit RUN_STARTED on first event
+ if not self.run_started_emitted:
+ self.run_started_emitted = True
+ chunks.append({
+ "type": "RUN_STARTED",
+ "runId": self.run_id,
+ "model": self.model,
+ "timestamp": self.timestamp,
+ })
+
if event_type == "content_block_start":
# Tool call is starting
content_block = self._get_attr(event, "content_block")
if content_block and self._get_attr(content_block, "type") == "tool_use":
self.current_tool_index += 1
+ tool_call_id = self._get_attr(content_block, "id")
+ tool_name = self._get_attr(content_block, "name")
self.tool_calls_map[self.current_tool_index] = {
- "id": self._get_attr(content_block, "id"),
- "name": self._get_attr(content_block, "name"),
+ "id": tool_call_id,
+ "name": tool_name,
"input": ""
}
+ # Emit TOOL_CALL_START
+ chunks.append({
+ "type": "TOOL_CALL_START",
+ "toolCallId": tool_call_id,
+ "toolName": tool_name,
+ "model": self.model,
+ "timestamp": self.timestamp,
+ "index": self.current_tool_index,
+ })
elif event_type == "content_block_delta":
delta = self._get_attr(event, "delta")
if delta and self._get_attr(delta, "type") == "text_delta":
+ # Emit TEXT_MESSAGE_START on first text
+ if not self.text_message_started:
+ self.text_message_started = True
+ chunks.append({
+ "type": "TEXT_MESSAGE_START",
+ "messageId": self.message_id,
+ "model": self.model,
+ "timestamp": self.timestamp,
+ "role": "assistant",
+ })
+
# Text content delta
delta_text = self._get_attr(delta, "text", "")
self.accumulated_content += delta_text
chunks.append({
- "type": "content",
- "id": self.generate_id(),
+ "type": "TEXT_MESSAGE_CONTENT",
+ "messageId": self.message_id,
"model": self.model,
"timestamp": self.timestamp,
"delta": delta_text,
"content": self.accumulated_content,
- "role": "assistant"
})
elif delta and self._get_attr(delta, "type") == "input_json_delta":
@@ -92,22 +137,38 @@ async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
if tool_call:
tool_call["input"] += partial_json
+ # Emit TOOL_CALL_ARGS
chunks.append({
- "type": "tool_call",
- "id": self.generate_id(),
+ "type": "TOOL_CALL_ARGS",
+ "toolCallId": tool_call["id"],
"model": self.model,
"timestamp": self.timestamp,
- "toolCall": {
- "id": tool_call["id"],
- "type": "function",
- "function": {
- "name": tool_call["name"],
- "arguments": partial_json # Incremental JSON
- }
- },
- "index": self.current_tool_index
+ "delta": partial_json,
+ "args": tool_call["input"],
})
+ elif event_type == "content_block_stop":
+ # Emit TEXT_MESSAGE_END if we had text content
+ if self.text_message_started and self.accumulated_content:
+ chunks.append({
+ "type": "TEXT_MESSAGE_END",
+ "messageId": self.message_id,
+ "model": self.model,
+ "timestamp": self.timestamp,
+ })
+
+ # Emit TOOL_CALL_END for tool calls
+ tool_call = self.tool_calls_map.get(self.current_tool_index)
+ if tool_call:
+ chunks.append({
+ "type": "TOOL_CALL_END",
+ "toolCallId": tool_call["id"],
+ "toolName": tool_call["name"],
+ "model": self.model,
+ "timestamp": self.timestamp,
+ "input": self._safe_json_parse(tool_call["input"] or "{}"),
+ })
+
elif event_type == "message_delta":
# Message metadata update (includes stop_reason and usage)
delta = self._get_attr(event, "delta")
@@ -115,7 +176,7 @@ async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
stop_reason = self._get_attr(delta, "stop_reason") if delta else None
if stop_reason:
- # Map Anthropic stop_reason to TanStack format
+ # Map Anthropic stop_reason to AG-UI format
if stop_reason == "tool_use":
finish_reason = "tool_calls"
elif stop_reason == "end_turn":
@@ -133,8 +194,8 @@ async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
self.done_emitted = True
chunks.append({
- "type": "done",
- "id": self.generate_id(),
+ "type": "RUN_FINISHED",
+ "runId": self.run_id,
"model": self.model,
"timestamp": self.timestamp,
"finishReason": finish_reason,
@@ -146,8 +207,8 @@ async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
if not self.done_emitted:
self.done_emitted = True
chunks.append({
- "type": "done",
- "id": self.generate_id(),
+ "type": "RUN_FINISHED",
+ "runId": self.run_id,
"model": self.model,
"timestamp": self.timestamp,
"finishReason": "stop"
@@ -156,9 +217,19 @@ async def convert_anthropic_event(self, event: Any) -> List[Dict[str, Any]]:
return chunks
async def convert_openai_event(self, event: Any) -> List[Dict[str, Any]]:
- """Convert OpenAI streaming event to StreamChunk format"""
+ """Convert OpenAI streaming event to AG-UI event format"""
chunks = []
+ # Emit RUN_STARTED on first event
+ if not self.run_started_emitted:
+ self.run_started_emitted = True
+ chunks.append({
+ "type": "RUN_STARTED",
+ "runId": self.run_id,
+ "model": self.model,
+ "timestamp": self.timestamp,
+ })
+
# OpenAI events have chunk.choices[0].delta structure
choice = self._get_attr(event, "choices", [])
if choice and len(choice) > 0:
@@ -173,40 +244,70 @@ async def convert_openai_event(self, event: Any) -> List[Dict[str, Any]]:
if delta:
content = self._get_attr(delta, "content")
if content:
+ # Emit TEXT_MESSAGE_START on first text
+ if not self.text_message_started:
+ self.text_message_started = True
+ chunks.append({
+ "type": "TEXT_MESSAGE_START",
+ "messageId": self.message_id,
+ "model": self._get_attr(event, "model", self.model),
+ "timestamp": self.timestamp,
+ "role": "assistant",
+ })
+
self.accumulated_content += content
chunks.append({
- "type": "content",
- "id": self._get_attr(event, "id", self.generate_id()),
+ "type": "TEXT_MESSAGE_CONTENT",
+ "messageId": self.message_id,
"model": self._get_attr(event, "model", self.model),
"timestamp": self.timestamp,
"delta": content,
"content": self.accumulated_content,
- "role": "assistant"
})
# Handle tool calls
tool_calls = self._get_attr(delta, "tool_calls")
if tool_calls:
for tool_call in tool_calls:
+ tool_call_id = self._get_attr(tool_call, "id", f"call_{self.timestamp}")
+ function = self._get_attr(tool_call, "function", {})
+ tool_name = self._get_attr(function, "name", "")
+ args = self._get_attr(function, "arguments", "")
+ index = self._get_attr(tool_call, "index", 0)
+
+ # Emit TOOL_CALL_START
chunks.append({
- "type": "tool_call",
- "id": self._get_attr(event, "id", self.generate_id()),
+ "type": "TOOL_CALL_START",
+ "toolCallId": tool_call_id,
+ "toolName": tool_name,
"model": self._get_attr(event, "model", self.model),
"timestamp": self.timestamp,
- "toolCall": {
- "id": self._get_attr(tool_call, "id", f"call_{self.timestamp}"),
- "type": "function",
- "function": {
- "name": self._get_attr(self._get_attr(tool_call, "function", {}), "name", ""),
- "arguments": self._get_attr(self._get_attr(tool_call, "function", {}), "arguments", "")
- }
- },
- "index": self._get_attr(tool_call, "index", 0)
+ "index": index,
})
+
+ # Emit TOOL_CALL_ARGS if there are arguments
+ if args:
+ chunks.append({
+ "type": "TOOL_CALL_ARGS",
+ "toolCallId": tool_call_id,
+ "model": self._get_attr(event, "model", self.model),
+ "timestamp": self.timestamp,
+ "delta": args,
+ "args": args,
+ })
# Handle completion
finish_reason = self._get_attr(choice, "finish_reason")
if finish_reason:
+ # Emit TEXT_MESSAGE_END if we had text
+ if self.text_message_started:
+ chunks.append({
+ "type": "TEXT_MESSAGE_END",
+ "messageId": self.message_id,
+ "model": self._get_attr(event, "model", self.model),
+ "timestamp": self.timestamp,
+ })
+
usage = self._get_attr(event, "usage")
usage_dict = None
if usage:
@@ -218,8 +319,8 @@ async def convert_openai_event(self, event: Any) -> List[Dict[str, Any]]:
self.done_emitted = True
chunks.append({
- "type": "done",
- "id": self._get_attr(event, "id", self.generate_id()),
+ "type": "RUN_FINISHED",
+ "runId": self.run_id,
"model": self._get_attr(event, "model", self.model),
"timestamp": self.timestamp,
"finishReason": finish_reason,
@@ -230,7 +331,7 @@ async def convert_openai_event(self, event: Any) -> List[Dict[str, Any]]:
async def convert_event(self, event: Any) -> List[Dict[str, Any]]:
"""
- Convert provider streaming event to StreamChunk format.
+ Convert provider streaming event to AG-UI event format.
Automatically detects provider based on event structure.
"""
if self.provider == "anthropic":
@@ -252,10 +353,10 @@ async def convert_event(self, event: Any) -> List[Dict[str, Any]]:
return await self.convert_anthropic_event(event)
async def convert_error(self, error: Exception) -> Dict[str, Any]:
- """Convert an error to ErrorStreamChunk format"""
+ """Convert an error to RUN_ERROR event format"""
return {
- "type": "error",
- "id": self.generate_id(),
+ "type": "RUN_ERROR",
+ "runId": self.run_id,
"model": self.model,
"timestamp": self.timestamp,
"error": {
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/sse.py b/packages/python/tanstack-ai/src/tanstack_ai/sse.py
index 7d3e30ee..f6626014 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/sse.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/sse.py
@@ -33,7 +33,7 @@ def format_sse_done() -> str:
def format_sse_error(error: Exception) -> str:
"""
- Format an error as an SSE error chunk.
+ Format an error as an SSE RUN_ERROR event (AG-UI protocol).
Args:
error: Exception to format
@@ -42,9 +42,9 @@ def format_sse_error(error: Exception) -> str:
SSE-formatted error chunk
"""
error_chunk = {
- "type": "error",
+ "type": "RUN_ERROR",
"error": {
- "type": type(error).__name__,
+ "code": type(error).__name__,
"message": str(error)
}
}
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/tool_manager.py b/packages/python/tanstack-ai/src/tanstack_ai/tool_manager.py
index 07d07d5c..e28c1c33 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/tool_manager.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/tool_manager.py
@@ -10,11 +10,9 @@
from typing import Any, Dict, List, Optional, Tuple
from .types import (
- DoneStreamChunk,
ModelMessage,
Tool,
ToolCall,
- ToolResultStreamChunk,
)
diff --git a/packages/python/tanstack-ai/src/tanstack_ai/types.py b/packages/python/tanstack-ai/src/tanstack_ai/types.py
index 21ccc701..361c74f5 100644
--- a/packages/python/tanstack-ai/src/tanstack_ai/types.py
+++ b/packages/python/tanstack-ai/src/tanstack_ai/types.py
@@ -1,8 +1,8 @@
"""
Type definitions for TanStack AI Python package.
-This module defines the core types used throughout the package, mirroring the
-TypeScript implementation for consistency across platforms.
+This module defines the core types used throughout the package, following the
+AG-UI (Agent-User Interface) protocol for consistent event streaming.
"""
from dataclasses import dataclass, field
@@ -84,115 +84,185 @@ class Tool:
# ============================================================================
-# Stream Chunk Types
+# AG-UI Protocol Event Types
# ============================================================================
-
-StreamChunkType = Literal[
- "content",
- "thinking",
- "tool_call",
- "tool-input-available",
- "approval-requested",
- "tool_result",
- "done",
- "error",
+EventType = Literal[
+ "RUN_STARTED",
+ "RUN_FINISHED",
+ "RUN_ERROR",
+ "TEXT_MESSAGE_START",
+ "TEXT_MESSAGE_CONTENT",
+ "TEXT_MESSAGE_END",
+ "TOOL_CALL_START",
+ "TOOL_CALL_ARGS",
+ "TOOL_CALL_END",
+ "STEP_STARTED",
+ "STEP_FINISHED",
+ "STATE_SNAPSHOT",
+ "STATE_DELTA",
+ "CUSTOM",
]
-class BaseStreamChunk(TypedDict):
- """Base structure for all stream chunks."""
+class BaseEvent(TypedDict, total=False):
+ """Base structure for all AG-UI events."""
- type: StreamChunkType
- id: str
- model: str
+ type: EventType
timestamp: int # Unix timestamp in milliseconds
+ model: Optional[str] # TanStack AI addition
+ rawEvent: Optional[Any] # Original provider event
+
+
+class UsageInfo(TypedDict, total=False):
+ """Token usage information."""
+
+ promptTokens: int
+ completionTokens: int
+ totalTokens: int
+
+
+class ErrorInfo(TypedDict, total=False):
+ """Error information."""
+
+ message: str
+ code: Optional[str]
+
+
+class RunStartedEvent(BaseEvent):
+ """Emitted when a run starts."""
+
+ runId: str
+ threadId: Optional[str]
-class ContentStreamChunk(BaseStreamChunk):
- """Emitted when the model generates text content."""
+class RunFinishedEvent(BaseEvent):
+ """Emitted when a run completes successfully."""
+
+ runId: str
+ finishReason: Optional[Literal["stop", "length", "content_filter", "tool_calls"]]
+ usage: Optional[UsageInfo]
+
+
+class RunErrorEvent(BaseEvent):
+ """Emitted when an error occurs during a run."""
+
+ runId: Optional[str]
+ error: ErrorInfo
- delta: str # The incremental content token
- content: str # Full accumulated content so far
- role: Optional[Literal["assistant"]]
+class TextMessageStartEvent(BaseEvent):
+ """Emitted when a text message starts."""
-class ThinkingStreamChunk(BaseStreamChunk):
- """Emitted when the model exposes its reasoning process."""
+ messageId: str
+ role: Literal["assistant"]
- delta: Optional[str] # The incremental thinking token
- content: str # Full accumulated thinking content so far
+class TextMessageContentEvent(BaseEvent):
+ """Emitted when text content is generated (streaming tokens)."""
-class ToolCallStreamChunk(BaseStreamChunk):
- """Emitted when the model decides to call a tool/function."""
+ messageId: str
+ delta: str
+ content: Optional[str] # Full accumulated content so far
- toolCall: ToolCall
- index: int # Index of this tool call (for parallel calls)
+class TextMessageEndEvent(BaseEvent):
+ """Emitted when a text message completes."""
-class ToolInputAvailableStreamChunk(BaseStreamChunk):
- """Emitted when tool inputs are complete and ready for client-side execution."""
+ messageId: str
+
+
+class ApprovalInfo(TypedDict, total=False):
+ """Approval metadata for tools requiring user approval."""
+
+ id: str
+ needsApproval: bool
+
+
+class ToolCallStartEvent(BaseEvent):
+ """Emitted when a tool call starts."""
toolCallId: str
toolName: str
- input: Any # Parsed tool arguments
+ index: Optional[int]
+ approval: Optional[ApprovalInfo]
-class ApprovalRequestedStreamChunk(BaseStreamChunk):
- """Emitted when a tool requires user approval before execution."""
+class ToolCallArgsEvent(BaseEvent):
+ """Emitted when tool call arguments are streaming."""
toolCallId: str
- toolName: str
- input: Any
- approval: Dict[str, Any] # Contains 'id' and 'needsApproval'
+ delta: str # Incremental JSON arguments delta
+ args: Optional[str] # Full accumulated arguments
-class ToolResultStreamChunk(BaseStreamChunk):
- """Emitted when a tool execution completes."""
+class ToolCallEndEvent(BaseEvent):
+ """Emitted when a tool call completes (with optional result)."""
toolCallId: str
- content: str # Result of the tool execution (JSON stringified)
+ toolName: str
+ input: Optional[Any] # Final parsed input arguments
+ result: Optional[str] # Tool execution result
-class UsageInfo(TypedDict, total=False):
- """Token usage information."""
+class StepStartedEvent(BaseEvent):
+ """Emitted when a reasoning/thinking step starts."""
- promptTokens: int
- completionTokens: int
- totalTokens: int
+ stepId: str
+ stepType: Literal["thinking", "reasoning", "planning"]
-class DoneStreamChunk(BaseStreamChunk):
- """Emitted when the stream completes successfully."""
+class StepFinishedEvent(BaseEvent):
+ """Emitted when a reasoning/thinking step completes or streams content."""
- finishReason: Optional[Literal["stop", "length", "content_filter", "tool_calls"]]
- usage: Optional[UsageInfo]
+ stepId: str
+ delta: Optional[str] # Incremental thinking token
+ content: str # Full accumulated thinking content
-class ErrorInfo(TypedDict, total=False):
- """Error information."""
+class StateDeltaOp(TypedDict):
+ """A single state delta operation."""
- message: str
- code: Optional[str]
+ op: Literal["add", "remove", "replace"]
+ path: str
+ value: Optional[Any]
-class ErrorStreamChunk(BaseStreamChunk):
- """Emitted when an error occurs during streaming."""
+class StateSnapshotEvent(BaseEvent):
+ """Emitted for full state synchronization."""
- error: ErrorInfo
+ state: Dict[str, Any]
+
+
+class StateDeltaEvent(BaseEvent):
+ """Emitted for incremental state updates."""
+
+ delta: List[StateDeltaOp]
+
+
+class CustomEvent(BaseEvent):
+ """Custom event for extensibility."""
+
+ name: str
+ value: Any
-# Union type for all stream chunks
+# Union type for all AG-UI events
StreamChunk = Union[
- ContentStreamChunk,
- ThinkingStreamChunk,
- ToolCallStreamChunk,
- ToolInputAvailableStreamChunk,
- ApprovalRequestedStreamChunk,
- ToolResultStreamChunk,
- DoneStreamChunk,
- ErrorStreamChunk,
+ RunStartedEvent,
+ RunFinishedEvent,
+ RunErrorEvent,
+ TextMessageStartEvent,
+ TextMessageContentEvent,
+ TextMessageEndEvent,
+ ToolCallStartEvent,
+ ToolCallArgsEvent,
+ ToolCallEndEvent,
+ StepStartedEvent,
+ StepFinishedEvent,
+ StateSnapshotEvent,
+ StateDeltaEvent,
+ CustomEvent,
]
diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts
index edfcea79..acb7a942 100644
--- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts
+++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts
@@ -109,10 +109,9 @@ export class Anthropic extends BaseAdapter<
stack: error?.stack,
})
- // Emit an error chunk
+ // Emit an error chunk (AG-UI RUN_ERROR)
yield {
- type: 'error',
- id: this.generateId(),
+ type: 'RUN_ERROR',
model: options.model,
timestamp: Date.now(),
error: {
@@ -431,12 +430,31 @@ export class Anthropic extends BaseAdapter<
const timestamp = Date.now()
const toolCallsMap = new Map<
number,
- { id: string; name: string; input: string }
+ { id: string; name: string; input: string; started: boolean }
>()
let currentToolIndex = -1
+ // AG-UI lifecycle tracking
+ const runId = generateId()
+ const messageId = generateId()
+ const stepId = generateId()
+ let hasEmittedRunStarted = false
+ let hasEmittedTextMessageStart = false
+ let hasEmittedStepStarted = false
+
try {
for await (const event of stream) {
+ // Emit RUN_STARTED on first event
+ if (!hasEmittedRunStarted) {
+ hasEmittedRunStarted = true
+ yield {
+ type: 'RUN_STARTED',
+ runId,
+ model,
+ timestamp,
+ }
+ }
+
if (event.type === 'content_block_start') {
if (event.content_block.type === 'tool_use') {
currentToolIndex++
@@ -444,32 +462,55 @@ export class Anthropic extends BaseAdapter<
id: event.content_block.id,
name: event.content_block.name,
input: '',
+ started: false,
})
} else if (event.content_block.type === 'thinking') {
// Reset thinking content when a new thinking block starts
accumulatedThinking = ''
+ // Emit STEP_STARTED for thinking
+ if (!hasEmittedStepStarted) {
+ hasEmittedStepStarted = true
+ yield {
+ type: 'STEP_STARTED',
+ stepId,
+ model,
+ timestamp,
+ stepType: 'thinking',
+ }
+ }
}
} else if (event.type === 'content_block_delta') {
if (event.delta.type === 'text_delta') {
+ // Emit TEXT_MESSAGE_START on first text content
+ if (!hasEmittedTextMessageStart) {
+ hasEmittedTextMessageStart = true
+ yield {
+ type: 'TEXT_MESSAGE_START',
+ messageId,
+ model,
+ timestamp,
+ role: 'assistant',
+ }
+ }
+
const delta = event.delta.text
accumulatedContent += delta
yield {
- type: 'content',
- id: generateId(),
- model: model,
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId,
+ model,
timestamp,
delta,
content: accumulatedContent,
- role: 'assistant',
}
} else if (event.delta.type === 'thinking_delta') {
// Handle thinking content
const delta = event.delta.thinking
accumulatedThinking += delta
yield {
- type: 'thinking',
- id: generateId(),
- model: model,
+ type: 'STEP_FINISHED',
+ stepId,
+ model,
timestamp,
delta,
content: accumulatedThinking,
@@ -478,55 +519,75 @@ export class Anthropic extends BaseAdapter<
// Tool input is being streamed
const existing = toolCallsMap.get(currentToolIndex)
if (existing) {
+ // Emit TOOL_CALL_START on first args
+ if (!existing.started) {
+ existing.started = true
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId: existing.id,
+ toolName: existing.name,
+ model,
+ timestamp,
+ index: currentToolIndex,
+ }
+ }
+
// Accumulate the input for final processing
existing.input += event.delta.partial_json
- // Yield the DELTA (partial_json), not the full accumulated input
- // The stream processor will concatenate these deltas
+ // Yield the DELTA (partial_json) as TOOL_CALL_ARGS
yield {
- type: 'tool_call',
- id: generateId(),
- model: model,
+ type: 'TOOL_CALL_ARGS',
+ toolCallId: existing.id,
+ model,
timestamp,
- toolCall: {
- id: existing.id,
- type: 'function',
- function: {
- name: existing.name,
- arguments: event.delta.partial_json,
- },
- },
- index: currentToolIndex,
+ delta: event.delta.partial_json,
+ args: existing.input,
}
}
}
} else if (event.type === 'content_block_stop') {
- // If this is a tool call and we haven't received any input deltas,
- // emit a tool_call chunk with empty arguments
+ // If this is a tool call, emit TOOL_CALL_END
const existing = toolCallsMap.get(currentToolIndex)
- if (existing && existing.input === '') {
- // No input_json_delta events received, emit empty arguments
+ if (existing) {
+ // If we never started (no input deltas), emit start first
+ if (!existing.started) {
+ existing.started = true
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId: existing.id,
+ toolName: existing.name,
+ model,
+ timestamp,
+ index: currentToolIndex,
+ }
+ }
+
+ // Emit TOOL_CALL_END with final input
yield {
- type: 'tool_call',
- id: generateId(),
- model: model,
+ type: 'TOOL_CALL_END',
+ toolCallId: existing.id,
+ toolName: existing.name,
+ model,
+ timestamp,
+ input: this.safeJsonParse(existing.input || '{}'),
+ }
+ }
+
+ // Emit TEXT_MESSAGE_END if we had text content
+ if (hasEmittedTextMessageStart && accumulatedContent) {
+ yield {
+ type: 'TEXT_MESSAGE_END',
+ messageId,
+ model,
timestamp,
- toolCall: {
- id: existing.id,
- type: 'function',
- function: {
- name: existing.name,
- arguments: '{}',
- },
- },
- index: currentToolIndex,
}
}
} else if (event.type === 'message_stop') {
yield {
- type: 'done',
- id: generateId(),
- model: model,
+ type: 'RUN_FINISHED',
+ runId,
+ model,
timestamp,
finishReason: 'stop',
}
@@ -535,12 +596,11 @@ export class Anthropic extends BaseAdapter<
switch (event.delta.stop_reason) {
case 'tool_use': {
yield {
- type: 'done',
- id: generateId(),
- model: model,
+ type: 'RUN_FINISHED',
+ runId,
+ model,
timestamp,
finishReason: 'tool_calls',
-
usage: {
promptTokens: event.usage.input_tokens || 0,
completionTokens: event.usage.output_tokens || 0,
@@ -553,9 +613,9 @@ export class Anthropic extends BaseAdapter<
}
case 'max_tokens': {
yield {
- type: 'error',
- id: generateId(),
- model: model,
+ type: 'RUN_ERROR',
+ runId,
+ model,
timestamp,
error: {
message:
@@ -567,9 +627,9 @@ export class Anthropic extends BaseAdapter<
}
case 'model_context_window_exceeded': {
yield {
- type: 'error',
- id: generateId(),
- model: model,
+ type: 'RUN_ERROR',
+ runId,
+ model,
timestamp,
error: {
message:
@@ -581,9 +641,9 @@ export class Anthropic extends BaseAdapter<
}
case 'refusal': {
yield {
- type: 'error',
- id: generateId(),
- model: model,
+ type: 'RUN_ERROR',
+ runId,
+ model,
timestamp,
error: {
message: 'The model refused to complete the request.',
@@ -594,9 +654,9 @@ export class Anthropic extends BaseAdapter<
}
default: {
yield {
- type: 'done',
- id: generateId(),
- model: model,
+ type: 'RUN_FINISHED',
+ runId,
+ model,
timestamp,
finishReason: 'stop',
usage: {
@@ -624,9 +684,9 @@ export class Anthropic extends BaseAdapter<
})
yield {
- type: 'error',
- id: generateId(),
- model: model,
+ type: 'RUN_ERROR',
+ runId,
+ model,
timestamp,
error: {
message: error?.message || 'Unknown error occurred',
@@ -635,6 +695,14 @@ export class Anthropic extends BaseAdapter<
}
}
}
+
+ private safeJsonParse(jsonString: string): any {
+ try {
+ return JSON.parse(jsonString)
+ } catch {
+ return jsonString
+ }
+ }
}
/**
* Creates an Anthropic adapter with simplified configuration
diff --git a/packages/typescript/ai-devtools/src/store/ai-context.tsx b/packages/typescript/ai-devtools/src/store/ai-context.tsx
index 42af6917..25e73005 100644
--- a/packages/typescript/ai-devtools/src/store/ai-context.tsx
+++ b/packages/typescript/ai-devtools/src/store/ai-context.tsx
@@ -978,7 +978,10 @@ export const AIProvider: ParentComponent = (props) => {
type: 'tool_result',
messageId: e.payload.messageId,
toolCallId: e.payload.toolCallId,
- content: e.payload.result,
+ content:
+ typeof e.payload.result === 'string'
+ ? e.payload.result
+ : JSON.stringify(e.payload.result),
timestamp: e.payload.timestamp,
chunkCount: 1,
}
diff --git a/packages/typescript/ai-gemini/src/gemini-adapter.ts b/packages/typescript/ai-gemini/src/gemini-adapter.ts
index 8a711b10..e6d9815c 100644
--- a/packages/typescript/ai-gemini/src/gemini-adapter.ts
+++ b/packages/typescript/ai-gemini/src/gemini-adapter.ts
@@ -80,8 +80,7 @@ export class GeminiAdapter extends BaseAdapter<
} catch (error) {
const timestamp = Date.now()
yield {
- type: 'error',
- id: this.generateId(),
+ type: 'RUN_ERROR',
model: options.model,
timestamp,
error: {
@@ -219,11 +218,29 @@ export class GeminiAdapter extends BaseAdapter<
let accumulatedContent = ''
const toolCallMap = new Map<
string,
- { name: string; args: string; index: number }
+ { name: string; args: string; index: number; started: boolean }
>()
let nextToolIndex = 0
+
+ // AG-UI lifecycle tracking
+ const runId = this.generateId()
+ const messageId = this.generateId()
+ let hasEmittedRunStarted = false
+ let hasEmittedTextMessageStart = false
+
// Iterate over the stream result (it's already an AsyncGenerator)
for await (const chunk of result) {
+ // Emit RUN_STARTED on first chunk
+ if (!hasEmittedRunStarted) {
+ hasEmittedRunStarted = true
+ yield {
+ type: 'RUN_STARTED',
+ runId,
+ model,
+ timestamp,
+ }
+ }
+
// Extract content from candidates[0].content.parts
// Parts can contain text or functionCall
if (chunk.candidates?.[0]?.content?.parts) {
@@ -232,20 +249,30 @@ export class GeminiAdapter extends BaseAdapter<
for (const part of parts) {
// Handle text content
if (part.text) {
+ // Emit TEXT_MESSAGE_START on first text
+ if (!hasEmittedTextMessageStart) {
+ hasEmittedTextMessageStart = true
+ yield {
+ type: 'TEXT_MESSAGE_START',
+ messageId,
+ model,
+ timestamp,
+ role: 'assistant',
+ }
+ }
+
accumulatedContent += part.text
yield {
- type: 'content',
- id: this.generateId(),
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId,
model,
timestamp,
delta: part.text,
content: accumulatedContent,
- role: 'assistant',
}
}
// Handle function calls (tool calls)
- // Check both camelCase (SDK) and snake_case (direct API) formats
const functionCall = part.functionCall
if (functionCall) {
const toolCallId =
@@ -262,11 +289,22 @@ export class GeminiAdapter extends BaseAdapter<
? functionArgs
: JSON.stringify(functionArgs),
index: nextToolIndex++,
+ started: false,
}
toolCallMap.set(toolCallId, toolCallData)
+
+ // Emit TOOL_CALL_START
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId,
+ toolName: toolCallData.name,
+ model,
+ timestamp,
+ index: toolCallData.index,
+ }
+ toolCallData.started = true
} else {
// Merge arguments if streaming
-
try {
const existingArgs = JSON.parse(toolCallData.args)
const newArgs =
@@ -276,7 +314,6 @@ export class GeminiAdapter extends BaseAdapter<
const mergedArgs = { ...existingArgs, ...newArgs }
toolCallData.args = JSON.stringify(mergedArgs)
} catch {
- // If parsing fails, use new args
toolCallData.args =
typeof functionArgs === 'string'
? functionArgs
@@ -284,34 +321,38 @@ export class GeminiAdapter extends BaseAdapter<
}
}
+ // Emit TOOL_CALL_ARGS with the arguments
yield {
- type: 'tool_call',
- id: this.generateId(),
+ type: 'TOOL_CALL_ARGS',
+ toolCallId,
model,
timestamp,
- toolCall: {
- id: toolCallId,
- type: 'function',
- function: {
- name: toolCallData.name,
- arguments: toolCallData.args,
- },
- },
- index: toolCallData.index,
+ delta: toolCallData.args,
+ args: toolCallData.args,
}
}
}
} else if (chunk.data) {
// Fallback to chunk.data if available
+ if (!hasEmittedTextMessageStart) {
+ hasEmittedTextMessageStart = true
+ yield {
+ type: 'TEXT_MESSAGE_START',
+ messageId,
+ model,
+ timestamp,
+ role: 'assistant',
+ }
+ }
+
accumulatedContent += chunk.data
yield {
- type: 'content',
- id: this.generateId(),
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId,
model,
timestamp,
delta: chunk.data,
content: accumulatedContent,
- role: 'assistant',
}
}
@@ -319,16 +360,34 @@ export class GeminiAdapter extends BaseAdapter<
if (chunk.candidates?.[0]?.finishReason) {
const finishReason = chunk.candidates[0].finishReason
+ // Emit TEXT_MESSAGE_END if we had text content
+ if (hasEmittedTextMessageStart) {
+ yield {
+ type: 'TEXT_MESSAGE_END',
+ messageId,
+ model,
+ timestamp,
+ }
+ }
+
+ // Emit TOOL_CALL_END for all tool calls
+ for (const [toolCallId, toolData] of toolCallMap) {
+ yield {
+ type: 'TOOL_CALL_END',
+ toolCallId,
+ toolName: toolData.name,
+ model,
+ timestamp,
+ input: this.safeJsonParse(toolData.args),
+ }
+ }
+
// UNEXPECTED_TOOL_CALL means Gemini tried to call a function but it wasn't properly declared
- // This typically means there's an issue with the tool declaration format
- // We should map it to tool_calls to try to process it anyway
if (finishReason === FinishReason.UNEXPECTED_TOOL_CALL) {
- // Try to extract function call from content.parts if available
if (chunk.candidates[0].content?.parts) {
for (const part of chunk.candidates[0].content.parts) {
const functionCall = part.functionCall
if (functionCall) {
- // We found a function call - process it
const toolCallId =
functionCall.name || `call_${Date.now()}_${nextToolIndex}`
const functionArgs = functionCall.args || {}
@@ -340,34 +399,37 @@ export class GeminiAdapter extends BaseAdapter<
? functionArgs
: JSON.stringify(functionArgs),
index: nextToolIndex++,
+ started: true,
})
yield {
- type: 'tool_call',
- id: this.generateId(),
+ type: 'TOOL_CALL_START',
+ toolCallId,
+ toolName: functionCall.name || '',
model,
timestamp,
- toolCall: {
- id: toolCallId,
- type: 'function',
- function: {
- name: functionCall.name || '',
- arguments:
- typeof functionArgs === 'string'
- ? functionArgs
- : JSON.stringify(functionArgs),
- },
- },
index: nextToolIndex - 1,
}
+
+ yield {
+ type: 'TOOL_CALL_END',
+ toolCallId,
+ toolName: functionCall.name || '',
+ model,
+ timestamp,
+ input:
+ typeof functionArgs === 'string'
+ ? this.safeJsonParse(functionArgs)
+ : functionArgs,
+ }
}
}
}
}
if (finishReason === FinishReason.MAX_TOKENS) {
yield {
- type: 'error',
- id: this.generateId(),
+ type: 'RUN_ERROR',
+ runId,
model,
timestamp,
error: {
@@ -378,8 +440,8 @@ export class GeminiAdapter extends BaseAdapter<
}
yield {
- type: 'done',
- id: this.generateId(),
+ type: 'RUN_FINISHED',
+ runId,
model,
timestamp,
finishReason: toolCallMap.size > 0 ? 'tool_calls' : 'stop',
@@ -395,6 +457,14 @@ export class GeminiAdapter extends BaseAdapter<
}
}
+ private safeJsonParse(jsonString: string): any {
+ try {
+ return JSON.parse(jsonString)
+ } catch {
+ return jsonString
+ }
+ }
+
private convertContentPartToGemini(part: ContentPart): Part {
switch (part.type) {
case 'text':
diff --git a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts
index 6c7a08aa..8ce9d052 100644
--- a/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts
+++ b/packages/typescript/ai-gemini/tests/gemini-adapter.test.ts
@@ -326,18 +326,27 @@ describe('GeminiAdapter through AI', () => {
expect(mocks.generateContentStreamSpy).toHaveBeenCalledTimes(1)
const [streamPayload] = mocks.generateContentStreamSpy.mock.calls[0]
expect(streamPayload.config?.topK).toBe(3)
+ // First chunk is RUN_STARTED
expect(received[0]).toMatchObject({
- type: 'content',
- delta: 'Partly ',
- content: 'Partly ',
+ type: 'RUN_STARTED',
})
+ // Then TEXT_MESSAGE_START before content
expect(received[1]).toMatchObject({
- type: 'content',
+ type: 'TEXT_MESSAGE_START',
+ role: 'assistant',
+ })
+ // Then TEXT_MESSAGE_CONTENT chunks
+ expect(received[2]).toMatchObject({
+ type: 'TEXT_MESSAGE_CONTENT',
+ delta: 'Partly ',
+ })
+ expect(received[3]).toMatchObject({
+ type: 'TEXT_MESSAGE_CONTENT',
delta: 'cloudy',
- content: 'Partly cloudy',
})
+ // Last chunk is RUN_FINISHED
expect(received.at(-1)).toMatchObject({
- type: 'done',
+ type: 'RUN_FINISHED',
finishReason: 'stop',
usage: {
promptTokens: 4,
diff --git a/packages/typescript/ai-ollama/src/ollama-adapter.ts b/packages/typescript/ai-ollama/src/ollama-adapter.ts
index fc6080c0..2842730b 100644
--- a/packages/typescript/ai-ollama/src/ollama-adapter.ts
+++ b/packages/typescript/ai-ollama/src/ollama-adapter.ts
@@ -271,44 +271,75 @@ export class Ollama extends BaseAdapter<
): AsyncIterable {
let accumulatedContent = ''
const timestamp = Date.now()
- const responseId: string = this.generateId()
+ const runId: string = this.generateId()
+ const messageId: string = this.generateId()
+ const stepId: string = this.generateId()
let accumulatedReasoning = ''
let hasEmittedToolCalls = false
+ let hasEmittedRunStarted = false
+ let hasEmittedTextMessageStart = false
+ let hasEmittedStepStarted = false
+
for await (const chunk of stream) {
- function handleToolCall(toolCall: ToolCall): StreamChunk {
- // we cast because the library types are missing id and index
- const actualToolCall = toolCall as ToolCall & {
- id: string
- function: { index: number }
- }
- return {
- type: 'tool_call',
- id: responseId,
+ // Emit RUN_STARTED on first chunk
+ if (!hasEmittedRunStarted) {
+ hasEmittedRunStarted = true
+ yield {
+ type: 'RUN_STARTED',
+ runId,
model: chunk.model,
timestamp,
- toolCall: {
- type: 'function',
- id: actualToolCall.id,
- function: {
- name: actualToolCall.function.name || '',
- arguments:
- typeof actualToolCall.function.arguments === 'string'
- ? actualToolCall.function.arguments
- : JSON.stringify(actualToolCall.function.arguments),
- },
- },
- index: actualToolCall.function.index,
}
}
+
if (chunk.done) {
+ // Emit TEXT_MESSAGE_END if we had text
+ if (hasEmittedTextMessageStart) {
+ yield {
+ type: 'TEXT_MESSAGE_END',
+ messageId,
+ model: chunk.model,
+ timestamp,
+ }
+ }
+
if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) {
- for (const toolCall of chunk.message.tool_calls) {
- yield handleToolCall(toolCall)
+ for (let i = 0; i < chunk.message.tool_calls.length; i++) {
+ const toolCall = chunk.message.tool_calls[i]
+ const actualToolCall = toolCall as ToolCall & {
+ id: string
+ function: { index: number }
+ }
+ const toolCallId = actualToolCall.id || `call_${timestamp}_${i}`
+ const toolName = actualToolCall.function.name || ''
+ const args =
+ typeof actualToolCall.function.arguments === 'string'
+ ? actualToolCall.function.arguments
+ : JSON.stringify(actualToolCall.function.arguments)
+
+ // Emit TOOL_CALL_START and TOOL_CALL_END together
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId,
+ toolName,
+ model: chunk.model,
+ timestamp,
+ index: actualToolCall.function.index,
+ }
+
+ yield {
+ type: 'TOOL_CALL_END',
+ toolCallId,
+ toolName,
+ model: chunk.model,
+ timestamp,
+ input: this.safeJsonParse(args),
+ }
hasEmittedToolCalls = true
}
yield {
- type: 'done',
- id: responseId || this.generateId(),
+ type: 'RUN_FINISHED',
+ runId,
model: chunk.model,
timestamp,
finishReason: 'tool_calls',
@@ -316,38 +347,91 @@ export class Ollama extends BaseAdapter<
continue
}
yield {
- type: 'done',
- id: responseId || this.generateId(),
+ type: 'RUN_FINISHED',
+ runId,
model: chunk.model,
timestamp,
finishReason: hasEmittedToolCalls ? 'tool_calls' : 'stop',
}
continue
}
+
if (chunk.message.content) {
+ // Emit TEXT_MESSAGE_START on first content
+ if (!hasEmittedTextMessageStart) {
+ hasEmittedTextMessageStart = true
+ yield {
+ type: 'TEXT_MESSAGE_START',
+ messageId,
+ model: chunk.model,
+ timestamp,
+ role: 'assistant',
+ }
+ }
+
accumulatedContent += chunk.message.content
yield {
- type: 'content',
- id: responseId || this.generateId(),
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId,
model: chunk.model,
timestamp,
delta: chunk.message.content,
content: accumulatedContent,
- role: 'assistant',
}
}
if (chunk.message.tool_calls && chunk.message.tool_calls.length > 0) {
- for (const toolCall of chunk.message.tool_calls) {
- yield handleToolCall(toolCall)
+ for (let i = 0; i < chunk.message.tool_calls.length; i++) {
+ const toolCall = chunk.message.tool_calls[i]
+ const actualToolCall = toolCall as ToolCall & {
+ id: string
+ function: { index: number }
+ }
+ const toolCallId = actualToolCall.id || `call_${timestamp}_${i}`
+ const toolName = actualToolCall.function.name || ''
+ const args =
+ typeof actualToolCall.function.arguments === 'string'
+ ? actualToolCall.function.arguments
+ : JSON.stringify(actualToolCall.function.arguments)
+
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId,
+ toolName,
+ model: chunk.model,
+ timestamp,
+ index: actualToolCall.function.index,
+ }
+
+ yield {
+ type: 'TOOL_CALL_ARGS',
+ toolCallId,
+ model: chunk.model,
+ timestamp,
+ delta: args,
+ args,
+ }
hasEmittedToolCalls = true
}
}
+
if (chunk.message.thinking) {
+ // Emit STEP_STARTED on first thinking
+ if (!hasEmittedStepStarted) {
+ hasEmittedStepStarted = true
+ yield {
+ type: 'STEP_STARTED',
+ stepId,
+ model: chunk.model,
+ timestamp,
+ stepType: 'thinking',
+ }
+ }
+
accumulatedReasoning += chunk.message.thinking
yield {
- type: 'thinking',
- id: responseId || this.generateId(),
+ type: 'STEP_FINISHED',
+ stepId,
model: chunk.model,
timestamp,
content: accumulatedReasoning,
@@ -357,6 +441,14 @@ export class Ollama extends BaseAdapter<
}
}
+ private safeJsonParse(jsonString: string): any {
+ try {
+ return JSON.parse(jsonString)
+ } catch {
+ return jsonString
+ }
+ }
+
/**
* Converts standard Tool format to Ollama-specific tool format
* Ollama uses OpenAI-compatible tool format
diff --git a/packages/typescript/ai-openai/src/openai-adapter.ts b/packages/typescript/ai-openai/src/openai-adapter.ts
index 676d5257..4423ea3d 100644
--- a/packages/typescript/ai-openai/src/openai-adapter.ts
+++ b/packages/typescript/ai-openai/src/openai-adapter.ts
@@ -210,10 +210,14 @@ export class OpenAI extends BaseAdapter<
// Track if we've been streaming deltas to avoid duplicating content from done events
let hasStreamedContentDeltas = false
let hasStreamedReasoningDeltas = false
+ let hasEmittedTextMessageStart = false
+ let hasEmittedStepStarted = false
// Preserve response metadata across events
let responseId: string | null = null
let model: string = options.model
+ let messageId: string | null = null
+ let stepId: string | null = null
const eventTypeCounts = new Map()
@@ -229,21 +233,20 @@ export class OpenAI extends BaseAdapter<
if (contentPart.type === 'output_text') {
accumulatedContent += contentPart.text
return {
- type: 'content',
- id: responseId || generateId(),
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId: messageId || generateId(),
model: model || options.model,
timestamp,
delta: contentPart.text,
content: accumulatedContent,
- role: 'assistant',
}
}
if (contentPart.type === 'reasoning_text') {
accumulatedReasoning += contentPart.text
return {
- type: 'thinking',
- id: responseId || generateId(),
+ type: 'STEP_FINISHED',
+ stepId: stepId || generateId(),
model: model || options.model,
timestamp,
delta: contentPart.text,
@@ -251,8 +254,8 @@ export class OpenAI extends BaseAdapter<
}
}
return {
- type: 'error',
- id: responseId || generateId(),
+ type: 'RUN_ERROR',
+ runId: responseId || undefined,
model: model || options.model,
timestamp,
error: {
@@ -268,15 +271,30 @@ export class OpenAI extends BaseAdapter<
) {
responseId = chunk.response.id
model = chunk.response.model
+ messageId = generateId()
+ stepId = generateId()
// Reset streaming flags for new response
hasStreamedContentDeltas = false
hasStreamedReasoningDeltas = false
+ hasEmittedTextMessageStart = false
+ hasEmittedStepStarted = false
accumulatedContent = ''
accumulatedReasoning = ''
+
+ // Emit RUN_STARTED event
+ if (chunk.type === 'response.created') {
+ yield {
+ type: 'RUN_STARTED',
+ runId: chunk.response.id,
+ model: chunk.response.model,
+ timestamp,
+ }
+ }
+
if (chunk.response.error) {
yield {
- type: 'error',
- id: chunk.response.id,
+ type: 'RUN_ERROR',
+ runId: chunk.response.id,
model: chunk.response.model,
timestamp,
error: chunk.response.error,
@@ -284,8 +302,8 @@ export class OpenAI extends BaseAdapter<
}
if (chunk.response.incomplete_details) {
yield {
- type: 'error',
- id: chunk.response.id,
+ type: 'RUN_ERROR',
+ runId: chunk.response.id,
model: chunk.response.model,
timestamp,
error: {
@@ -305,16 +323,27 @@ export class OpenAI extends BaseAdapter<
: ''
if (textDelta) {
+ // Emit TEXT_MESSAGE_START on first content
+ if (!hasEmittedTextMessageStart) {
+ hasEmittedTextMessageStart = true
+ yield {
+ type: 'TEXT_MESSAGE_START',
+ messageId: messageId || generateId(),
+ model: model || options.model,
+ timestamp,
+ role: 'assistant',
+ }
+ }
+
accumulatedContent += textDelta
hasStreamedContentDeltas = true
yield {
- type: 'content',
- id: responseId || generateId(),
+ type: 'TEXT_MESSAGE_CONTENT',
+ messageId: messageId || generateId(),
model: model || options.model,
timestamp,
delta: textDelta,
content: accumulatedContent,
- role: 'assistant',
}
}
}
@@ -330,11 +359,23 @@ export class OpenAI extends BaseAdapter<
: ''
if (reasoningDelta) {
+ // Emit STEP_STARTED on first reasoning content
+ if (!hasEmittedStepStarted) {
+ hasEmittedStepStarted = true
+ yield {
+ type: 'STEP_STARTED',
+ stepId: stepId || generateId(),
+ model: model || options.model,
+ timestamp,
+ stepType: 'thinking',
+ }
+ }
+
accumulatedReasoning += reasoningDelta
hasStreamedReasoningDeltas = true
yield {
- type: 'thinking',
- id: responseId || generateId(),
+ type: 'STEP_FINISHED',
+ stepId: stepId || generateId(),
model: model || options.model,
timestamp,
delta: reasoningDelta,
@@ -378,7 +419,13 @@ export class OpenAI extends BaseAdapter<
// Skip emitting chunks for content parts that we've already streamed via deltas
// The done event is just a completion marker, not new content
if (contentPart.type === 'output_text' && hasStreamedContentDeltas) {
- // Content already accumulated from deltas, skip
+ // Emit TEXT_MESSAGE_END
+ yield {
+ type: 'TEXT_MESSAGE_END',
+ messageId: messageId || generateId(),
+ model: model || options.model,
+ timestamp,
+ }
continue
}
if (
@@ -403,31 +450,35 @@ export class OpenAI extends BaseAdapter<
index: chunk.output_index,
name: item.name || '',
})
+
+ // Emit TOOL_CALL_START when we first see the function call
+ yield {
+ type: 'TOOL_CALL_START',
+ toolCallId: item.id,
+ toolName: item.name || '',
+ model: model || options.model,
+ timestamp,
+ index: chunk.output_index,
+ }
}
}
}
if (chunk.type === 'response.function_call_arguments.done') {
- const { item_id, output_index } = chunk
+ const { item_id } = chunk
// Get the function name from metadata (captured in output_item.added)
const metadata = toolCallMetadata.get(item_id)
const name = metadata?.name || ''
+ // Emit TOOL_CALL_END with the complete arguments
yield {
- type: 'tool_call',
- id: responseId || generateId(),
+ type: 'TOOL_CALL_END',
+ toolCallId: item_id,
+ toolName: name,
model: model || options.model,
timestamp,
- index: output_index,
- toolCall: {
- id: item_id,
- type: 'function',
- function: {
- name,
- arguments: chunk.arguments,
- },
- },
+ input: this.safeJsonParse(chunk.arguments),
}
}
@@ -439,8 +490,8 @@ export class OpenAI extends BaseAdapter<
)
yield {
- type: 'done',
- id: responseId || generateId(),
+ type: 'RUN_FINISHED',
+ runId: responseId || generateId(),
model: model || options.model,
timestamp,
usage: {
@@ -454,8 +505,8 @@ export class OpenAI extends BaseAdapter<
if (chunk.type === 'error') {
yield {
- type: 'error',
- id: responseId || generateId(),
+ type: 'RUN_ERROR',
+ runId: responseId || undefined,
model: model || options.model,
timestamp,
error: {
@@ -475,8 +526,7 @@ export class OpenAI extends BaseAdapter<
},
)
yield {
- type: 'error',
- id: generateId(),
+ type: 'RUN_ERROR',
model: options.model,
timestamp,
error: {
@@ -487,6 +537,14 @@ export class OpenAI extends BaseAdapter<
}
}
+ private safeJsonParse(jsonString: string): any {
+ try {
+ return JSON.parse(jsonString)
+ } catch {
+ return jsonString
+ }
+ }
+
/**
* Maps common options to OpenAI-specific format
* Handles translation of normalized options to OpenAI's API format
diff --git a/packages/typescript/ai/package.json b/packages/typescript/ai/package.json
index c395b371..5948d10c 100644
--- a/packages/typescript/ai/package.json
+++ b/packages/typescript/ai/package.json
@@ -52,14 +52,14 @@
],
"dependencies": {
"@tanstack/devtools-event-client": "^0.4.0",
- "partial-json": "^0.1.7"
+ "partial-json": "^0.1.7",
+ "zod": "^4.1.13"
},
"peerDependencies": {
"@alcyone-labs/zod-to-json-schema": "^4.0.0",
- "zod": "^3.0.0 || ^4.0.0"
+ "zod": "^4.0.0"
},
"devDependencies": {
- "@vitest/coverage-v8": "4.0.14",
- "zod": "^4.1.13"
+ "@vitest/coverage-v8": "4.0.14"
}
}
diff --git a/packages/typescript/ai/src/core/chat.ts b/packages/typescript/ai/src/core/chat.ts
index aebaf65a..eb376274 100644
--- a/packages/typescript/ai/src/core/chat.ts
+++ b/packages/typescript/ai/src/core/chat.ts
@@ -11,8 +11,8 @@ import type {
AgentLoopStrategy,
ChatOptions,
ChatStreamOptionsForModel,
- DoneStreamChunk,
ModelMessage,
+ RunFinishedEvent,
StreamChunk,
Tool,
ToolCall,
@@ -53,7 +53,7 @@ class ChatEngine<
private totalChunkCount = 0
private currentMessageId: string | null = null
private accumulatedContent = ''
- private doneChunk: DoneStreamChunk | null = null
+ private doneChunk: RunFinishedEvent | null = null
private shouldEmitStreamEnd = true
private earlyTermination = false
private toolPhase: ToolPhaseResult = 'continue'
@@ -215,68 +215,120 @@ class ChatEngine<
private handleStreamChunk(chunk: StreamChunk): void {
switch (chunk.type) {
- case 'content':
- this.handleContentChunk(chunk)
+ // AG-UI Event Types
+ case 'TEXT_MESSAGE_CONTENT':
+ this.handleTextMessageContentEvent(chunk)
break
- case 'tool_call':
- this.handleToolCallChunk(chunk)
+ case 'TOOL_CALL_START':
+ this.handleToolCallStartEvent(chunk)
break
- case 'tool_result':
- this.handleToolResultChunk(chunk)
+ case 'TOOL_CALL_ARGS':
+ this.handleToolCallArgsEvent(chunk)
+ break
+ case 'TOOL_CALL_END':
+ this.handleToolCallEndEvent(chunk)
+ break
+ case 'RUN_FINISHED':
+ this.handleRunFinishedEvent(chunk)
+ break
+ case 'RUN_ERROR':
+ this.handleRunErrorEvent(chunk)
+ break
+ case 'STEP_FINISHED':
+ this.handleStepFinishedEvent(chunk)
+ break
+
+ // Legacy Event Types (backward compatibility)
+ case 'content':
+ this.handleLegacyContentChunk(chunk)
break
case 'done':
- this.handleDoneChunk(chunk)
+ this.handleLegacyDoneChunk(chunk)
break
case 'error':
- this.handleErrorChunk(chunk)
+ this.handleLegacyErrorChunk(chunk)
break
- case 'thinking':
- this.handleThinkingChunk(chunk)
+ case 'tool_call':
+ this.handleLegacyToolCallChunk(chunk)
+ break
+ case 'tool_result':
+ this.handleLegacyToolResultChunk(chunk)
break
+
default:
+ // RUN_STARTED, TEXT_MESSAGE_START, TEXT_MESSAGE_END, STEP_STARTED,
+ // STATE_SNAPSHOT, STATE_DELTA, CUSTOM, thinking, approval-requested,
+ // tool-input-available - no special handling needed
break
}
}
- private handleContentChunk(chunk: Extract) {
- this.accumulatedContent = chunk.content
+ private handleTextMessageContentEvent(
+ chunk: Extract,
+ ) {
+ if (chunk.content) {
+ this.accumulatedContent = chunk.content
+ } else {
+ this.accumulatedContent += chunk.delta
+ }
aiEventClient.emit('stream:chunk:content', {
streamId: this.streamId,
messageId: this.currentMessageId || undefined,
- content: chunk.content,
+ content: this.accumulatedContent,
delta: chunk.delta,
timestamp: Date.now(),
})
}
- private handleToolCallChunk(
- chunk: Extract,
+ private handleToolCallStartEvent(
+ chunk: Extract,
): void {
- this.toolCallManager.addToolCallChunk(chunk)
+ this.toolCallManager.addToolCallStartEvent(chunk)
aiEventClient.emit('stream:chunk:tool-call', {
streamId: this.streamId,
messageId: this.currentMessageId || undefined,
- toolCallId: chunk.toolCall.id,
- toolName: chunk.toolCall.function.name,
- index: chunk.index,
- arguments: chunk.toolCall.function.arguments,
+ toolCallId: chunk.toolCallId,
+ toolName: chunk.toolName,
+ index: chunk.index ?? 0,
+ arguments: '',
timestamp: Date.now(),
})
}
- private handleToolResultChunk(
- chunk: Extract,
+ private handleToolCallArgsEvent(
+ chunk: Extract,
): void {
- aiEventClient.emit('stream:chunk:tool-result', {
+ this.toolCallManager.addToolCallArgsEvent(chunk)
+ aiEventClient.emit('stream:chunk:tool-call', {
streamId: this.streamId,
messageId: this.currentMessageId || undefined,
toolCallId: chunk.toolCallId,
- result: chunk.content,
+ toolName: '',
+ index: 0,
+ arguments: chunk.delta,
timestamp: Date.now(),
})
}
- private handleDoneChunk(chunk: DoneStreamChunk): void {
+ private handleToolCallEndEvent(
+ chunk: Extract,
+ ): void {
+ this.toolCallManager.completeToolCall(chunk.toolCallId, chunk.input)
+ if (chunk.result !== undefined) {
+ aiEventClient.emit('stream:chunk:tool-result', {
+ streamId: this.streamId,
+ messageId: this.currentMessageId || undefined,
+ toolCallId: chunk.toolCallId,
+ result:
+ typeof chunk.result === 'string'
+ ? chunk.result
+ : JSON.stringify(chunk.result),
+ timestamp: Date.now(),
+ })
+ }
+ }
+
+ private handleRunFinishedEvent(chunk: RunFinishedEvent): void {
// Don't overwrite a tool_calls finishReason with a stop finishReason
// This can happen when adapters send multiple done chunks
if (
@@ -328,8 +380,8 @@ class ChatEngine<
}
}
- private handleErrorChunk(
- chunk: Extract,
+ private handleRunErrorEvent(
+ chunk: Extract,
): void {
aiEventClient.emit('stream:chunk:error', {
streamId: this.streamId,
@@ -341,8 +393,8 @@ class ChatEngine<
this.shouldEmitStreamEnd = false
}
- private handleThinkingChunk(
- chunk: Extract,
+ private handleStepFinishedEvent(
+ chunk: Extract,
): void {
aiEventClient.emit('stream:chunk:thinking', {
streamId: this.streamId,
@@ -353,6 +405,88 @@ class ChatEngine<
})
}
+ // ============================================
+ // Legacy Event Handlers (Backward Compatibility)
+ // ============================================
+
+ private handleLegacyContentChunk(
+ chunk: Extract,
+ ): void {
+ if (chunk.content) {
+ this.accumulatedContent = chunk.content
+ } else {
+ this.accumulatedContent += chunk.delta
+ }
+ aiEventClient.emit('stream:chunk:content', {
+ streamId: this.streamId,
+ messageId: this.currentMessageId || undefined,
+ content: this.accumulatedContent,
+ delta: chunk.delta,
+ timestamp: Date.now(),
+ })
+ }
+
+ private handleLegacyDoneChunk(
+ chunk: Extract,
+ ): void {
+ // Create a RUN_FINISHED-like chunk for compatibility
+ const runFinishedChunk: RunFinishedEvent = {
+ type: 'RUN_FINISHED',
+ runId: chunk.id,
+ model: chunk.model,
+ timestamp: chunk.timestamp,
+ finishReason: chunk.finishReason ?? 'stop',
+ usage: chunk.usage,
+ }
+ this.handleRunFinishedEvent(runFinishedChunk)
+ }
+
+ private handleLegacyErrorChunk(
+ chunk: Extract,
+ ): void {
+ const errorMessage =
+ typeof chunk.error === 'string' ? chunk.error : chunk.error.message
+ aiEventClient.emit('stream:chunk:error', {
+ streamId: this.streamId,
+ messageId: this.currentMessageId || undefined,
+ error: errorMessage,
+ timestamp: Date.now(),
+ })
+ this.earlyTermination = true
+ this.shouldEmitStreamEnd = false
+ }
+
+ private handleLegacyToolCallChunk(
+ chunk: Extract,
+ ): void {
+ const toolCall = chunk.toolCall
+ this.toolCallManager.addToolCallChunk({
+ toolCall,
+ index: chunk.index,
+ })
+ aiEventClient.emit('stream:chunk:tool-call', {
+ streamId: this.streamId,
+ messageId: this.currentMessageId || undefined,
+ toolCallId: toolCall.id,
+ toolName: toolCall.function.name,
+ index: chunk.index,
+ arguments: toolCall.function.arguments,
+ timestamp: Date.now(),
+ })
+ }
+
+ private handleLegacyToolResultChunk(
+ chunk: Extract,
+ ): void {
+ aiEventClient.emit('stream:chunk:tool-result', {
+ streamId: this.streamId,
+ messageId: this.currentMessageId || undefined,
+ toolCallId: chunk.toolCallId,
+ result: chunk.content,
+ timestamp: Date.now(),
+ })
+ }
+
private async *checkForPendingToolCalls(): AsyncGenerator<
StreamChunk,
ToolPhaseResult,
@@ -542,7 +676,7 @@ class ChatEngine<
private emitApprovalRequests(
approvals: Array,
- doneChunk: DoneStreamChunk,
+ doneChunk: RunFinishedEvent,
): Array {
const chunks: Array = []
@@ -557,17 +691,20 @@ class ChatEngine<
timestamp: Date.now(),
})
+ // Emit CUSTOM event for approval requests
chunks.push({
- type: 'approval-requested',
- id: doneChunk.id,
- model: doneChunk.model,
+ type: 'CUSTOM',
timestamp: Date.now(),
- toolCallId: approval.toolCallId,
- toolName: approval.toolName,
- input: approval.input,
- approval: {
- id: approval.approvalId,
- needsApproval: true,
+ model: doneChunk.model ?? '',
+ name: 'approval-requested',
+ value: {
+ toolCallId: approval.toolCallId,
+ toolName: approval.toolName,
+ input: approval.input,
+ approval: {
+ id: approval.approvalId,
+ needsApproval: true,
+ },
},
})
}
@@ -577,7 +714,7 @@ class ChatEngine<
private emitClientToolInputs(
clientRequests: Array,
- doneChunk: DoneStreamChunk,
+ doneChunk: RunFinishedEvent,
): Array {
const chunks: Array = []
@@ -591,14 +728,17 @@ class ChatEngine<
timestamp: Date.now(),
})
+ // Emit CUSTOM event for client tool inputs
chunks.push({
- type: 'tool-input-available',
- id: doneChunk.id,
- model: doneChunk.model,
+ type: 'CUSTOM',
timestamp: Date.now(),
- toolCallId: clientTool.toolCallId,
- toolName: clientTool.toolName,
- input: clientTool.input,
+ model: doneChunk.model ?? '',
+ name: 'tool-input-available',
+ value: {
+ toolCallId: clientTool.toolCallId,
+ toolName: clientTool.toolName,
+ input: clientTool.input,
+ },
})
}
@@ -607,7 +747,7 @@ class ChatEngine<
private emitToolResults(
results: Array,
- doneChunk: DoneStreamChunk,
+ doneChunk: RunFinishedEvent,
): Array {
const chunks: Array = []
@@ -624,16 +764,15 @@ class ChatEngine<
})
const content = JSON.stringify(result.result)
- const chunk: Extract = {
- type: 'tool_result',
- id: doneChunk.id,
- model: doneChunk.model,
+ // Emit TOOL_CALL_END event with result
+ chunks.push({
+ type: 'TOOL_CALL_END',
timestamp: Date.now(),
+ model: doneChunk.model ?? '',
toolCallId: result.toolCallId,
- content,
- }
-
- chunks.push(chunk)
+ toolName: result.toolName,
+ result: result.result,
+ })
this.messages = [
...this.messages,
@@ -670,10 +809,10 @@ class ChatEngine<
return pending
}
- private createSyntheticDoneChunk(): DoneStreamChunk {
+ private createSyntheticDoneChunk(): RunFinishedEvent {
return {
- type: 'done',
- id: this.createId('pending'),
+ type: 'RUN_FINISHED',
+ runId: this.createId('pending'),
model: this.params.model,
timestamp: Date.now(),
finishReason: 'tool_calls',
diff --git a/packages/typescript/ai/src/stream/processor.ts b/packages/typescript/ai/src/stream/processor.ts
index d8441a33..ded6f66b 100644
--- a/packages/typescript/ai/src/stream/processor.ts
+++ b/packages/typescript/ai/src/stream/processor.ts
@@ -426,36 +426,101 @@ export class StreamProcessor {
}
switch (chunk.type) {
- case 'content':
- this.handleContentChunk(chunk)
+ // Run lifecycle events
+ case 'RUN_STARTED':
+ // Run started - could be used for initialization
break
- case 'tool_call':
- this.handleToolCallChunk(chunk)
+ case 'RUN_FINISHED':
+ this.handleRunFinishedEvent(chunk)
break
- case 'tool_result':
- this.handleToolResultChunk(chunk)
+ case 'RUN_ERROR':
+ this.handleRunErrorEvent(chunk)
+ break
+
+ // Text message events
+ case 'TEXT_MESSAGE_START':
+ // Message starting - could track message ID
+ break
+
+ case 'TEXT_MESSAGE_CONTENT':
+ this.handleTextMessageContentEvent(chunk)
+ break
+
+ case 'TEXT_MESSAGE_END':
+ // Message ended - finalize text if needed
+ break
+
+ // Tool call events
+ case 'TOOL_CALL_START':
+ this.handleToolCallStartEvent(chunk)
+ break
+
+ case 'TOOL_CALL_ARGS':
+ this.handleToolCallArgsEvent(chunk)
+ break
+
+ case 'TOOL_CALL_END':
+ this.handleToolCallEndEvent(chunk)
+ break
+
+ // Step/thinking events
+ case 'STEP_STARTED':
+ // Step started - could track step ID
+ break
+
+ case 'STEP_FINISHED':
+ this.handleStepFinishedEvent(chunk)
+ break
+
+ // State events
+ case 'STATE_SNAPSHOT':
+ // Full state sync - custom handling
+ break
+
+ case 'STATE_DELTA':
+ // Incremental state update - custom handling
+ break
+
+ // Custom events (including approval flows)
+ case 'CUSTOM':
+ this.handleCustomEvent(chunk)
+ break
+
+ // ============================================
+ // Legacy event types (backward compatibility)
+ // ============================================
+ case 'content':
+ this.handleLegacyContentChunk(chunk)
break
case 'done':
- this.handleDoneChunk(chunk)
+ this.handleLegacyDoneChunk(chunk)
break
case 'error':
- this.handleErrorChunk(chunk)
+ this.handleLegacyErrorChunk(chunk)
+ break
+
+ case 'tool_call':
+ this.handleLegacyToolCallChunk(chunk)
+ break
+
+ case 'tool_result':
+ this.handleLegacyToolResultChunk(chunk)
break
case 'thinking':
- this.handleThinkingChunk(chunk)
+ this.handleLegacyThinkingChunk(chunk)
break
case 'approval-requested':
- this.handleApprovalRequestedChunk(chunk)
+ this.handleLegacyApprovalRequestedChunk(chunk)
break
case 'tool-input-available':
- this.handleToolInputAvailableChunk(chunk)
+ this.handleLegacyToolInputAvailableChunk(chunk)
break
default:
@@ -465,10 +530,10 @@ export class StreamProcessor {
}
/**
- * Handle a content chunk
+ * Handle TEXT_MESSAGE_CONTENT event (AG-UI)
*/
- private handleContentChunk(
- chunk: Extract,
+ private handleTextMessageContentEvent(
+ chunk: Extract,
): void {
// Content arriving means all current tool calls are complete
this.completeAllToolCalls()
@@ -498,7 +563,7 @@ export class StreamProcessor {
// Prefer delta over content - delta is the incremental change
if (chunk.delta !== '') {
nextText = currentText + chunk.delta
- } else if (chunk.content !== '') {
+ } else if (chunk.content && chunk.content !== '') {
// Fallback: use content if delta is not provided
if (chunk.content.startsWith(currentText)) {
nextText = chunk.content
@@ -514,9 +579,8 @@ export class StreamProcessor {
this.currentSegmentText = nextText
this.totalTextContent += textDelta
- // Use delta for chunk strategy if available
- // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
- const chunkPortion = chunk.delta ?? chunk.content ?? ''
+ // Use delta for chunk strategy
+ const chunkPortion = chunk.delta
const shouldEmit = this.chunkStrategy.shouldEmit(
chunkPortion,
this.currentSegmentText,
@@ -527,37 +591,28 @@ export class StreamProcessor {
}
/**
- * Handle a tool call chunk
+ * Handle TOOL_CALL_START event (AG-UI)
*/
- private handleToolCallChunk(
- chunk: Extract,
+ private handleToolCallStartEvent(
+ chunk: Extract,
): void {
// Mark that we've seen tool calls since the last text segment
this.hasToolCallsSinceTextStart = true
- const toolCallId = chunk.toolCall.id
+ const toolCallId = chunk.toolCallId
const existingToolCall = this.toolCalls.get(toolCallId)
if (!existingToolCall) {
// New tool call starting
- const initialState: ToolCallState = chunk.toolCall.function.arguments
- ? 'input-streaming'
- : 'awaiting-input'
+ const initialState: ToolCallState = 'awaiting-input'
const newToolCall: InternalToolCallState = {
- id: chunk.toolCall.id,
- name: chunk.toolCall.function.name,
- arguments: chunk.toolCall.function.arguments || '',
+ id: toolCallId,
+ name: chunk.toolName,
+ arguments: '',
state: initialState,
parsedArguments: undefined,
- index: chunk.index,
- }
-
- // Try to parse the arguments
- if (chunk.toolCall.function.arguments) {
- newToolCall.parsedArguments = this.jsonParser.parse(
- chunk.toolCall.function.arguments,
- )
+ index: chunk.index ?? this.toolCallOrder.length,
}
this.toolCalls.set(toolCallId, newToolCall)
@@ -567,60 +622,70 @@ export class StreamProcessor {
const actualIndex = this.toolCallOrder.indexOf(toolCallId)
// Emit legacy lifecycle event
- this.handlers.onToolCallStart?.(
- actualIndex,
- chunk.toolCall.id,
- chunk.toolCall.function.name,
- )
+ this.handlers.onToolCallStart?.(actualIndex, toolCallId, chunk.toolName)
// Emit legacy state change event
this.handlers.onToolCallStateChange?.(
actualIndex,
- chunk.toolCall.id,
- chunk.toolCall.function.name,
+ toolCallId,
+ chunk.toolName,
initialState,
- chunk.toolCall.function.arguments || '',
- newToolCall.parsedArguments,
+ '',
+ undefined,
)
- // Emit initial delta
- if (chunk.toolCall.function.arguments) {
- this.handlers.onToolCallDelta?.(
- actualIndex,
- chunk.toolCall.function.arguments,
- )
- }
-
// Update UIMessage
if (this.currentAssistantMessageId) {
this.messages = updateToolCallPart(
this.messages,
this.currentAssistantMessageId,
{
- id: chunk.toolCall.id,
- name: chunk.toolCall.function.name,
- arguments: chunk.toolCall.function.arguments || '',
+ id: toolCallId,
+ name: chunk.toolName,
+ arguments: '',
state: initialState,
},
)
+
+ // If there's approval metadata, update it
+ if (chunk.approval) {
+ this.messages = updateToolCallApproval(
+ this.messages,
+ this.currentAssistantMessageId,
+ toolCallId,
+ chunk.approval.id,
+ )
+ }
this.emitMessagesChange()
// Emit new granular event
this.events.onToolCallStateChange?.(
this.currentAssistantMessageId,
- chunk.toolCall.id,
+ toolCallId,
initialState,
- chunk.toolCall.function.arguments || '',
+ '',
)
}
- } else {
- // Continuing existing tool call
+ }
+ }
+
+ /**
+ * Handle TOOL_CALL_ARGS event (AG-UI)
+ */
+ private handleToolCallArgsEvent(
+ chunk: Extract,
+ ): void {
+ const toolCallId = chunk.toolCallId
+ const existingToolCall = this.toolCalls.get(toolCallId)
+
+ if (existingToolCall) {
const wasAwaitingInput = existingToolCall.state === 'awaiting-input'
- existingToolCall.arguments += chunk.toolCall.function.arguments || ''
+ // Append delta to arguments
+ existingToolCall.arguments += chunk.delta || ''
// Update state
- if (wasAwaitingInput && chunk.toolCall.function.arguments) {
+ if (wasAwaitingInput && chunk.delta) {
existingToolCall.state = 'input-streaming'
}
@@ -643,11 +708,8 @@ export class StreamProcessor {
)
// Emit delta
- if (chunk.toolCall.function.arguments) {
- this.handlers.onToolCallDelta?.(
- actualIndex,
- chunk.toolCall.function.arguments,
- )
+ if (chunk.delta) {
+ this.handlers.onToolCallDelta?.(actualIndex, chunk.delta)
}
// Update UIMessage
@@ -676,47 +738,79 @@ export class StreamProcessor {
}
/**
- * Handle a tool result chunk
+ * Handle TOOL_CALL_END event (AG-UI)
+ * This handles both tool completion and tool results
*/
- private handleToolResultChunk(
- chunk: Extract,
+ private handleToolCallEndEvent(
+ chunk: Extract,
): void {
- const state: ToolResultState = 'complete'
+ const toolCallId = chunk.toolCallId
+ const existingToolCall = this.toolCalls.get(toolCallId)
- // Emit legacy handler
- this.handlers.onToolResultStateChange?.(
- chunk.toolCallId,
- chunk.content,
- state,
- )
+ if (existingToolCall) {
+ // Mark tool call as complete
+ existingToolCall.state = 'input-complete'
+ if (chunk.input) {
+ existingToolCall.parsedArguments = chunk.input
+ }
+ }
- // Update UIMessage if we have a current assistant message
- if (this.currentAssistantMessageId) {
- this.messages = updateToolResultPart(
- this.messages,
- this.currentAssistantMessageId,
- chunk.toolCallId,
- chunk.content,
- state,
+ // If there's a result, this is a tool result
+ if (chunk.result !== undefined) {
+ const state: ToolResultState = 'complete'
+ const resultContent =
+ typeof chunk.result === 'string'
+ ? chunk.result
+ : JSON.stringify(chunk.result)
+
+ // Emit handler
+ this.handlers.onToolResultStateChange?.(toolCallId, resultContent, state)
+
+ // Update UIMessage if we have a current assistant message
+ if (this.currentAssistantMessageId) {
+ this.messages = updateToolResultPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ toolCallId,
+ resultContent,
+ state,
+ )
+ this.emitMessagesChange()
+ }
+ } else if (chunk.input !== undefined) {
+ // This is tool input available (client tool ready for execution)
+ // Emit legacy handler
+ this.handlers.onToolInputAvailable?.(
+ toolCallId,
+ chunk.toolName,
+ chunk.input,
)
- this.emitMessagesChange()
+
+ // Emit new event
+ this.events.onToolCall?.({
+ toolCallId,
+ toolName: chunk.toolName,
+ input: chunk.input,
+ })
}
}
/**
- * Handle a done chunk
+ * Handle RUN_FINISHED event (AG-UI)
*/
- private handleDoneChunk(chunk: Extract): void {
+ private handleRunFinishedEvent(
+ chunk: Extract,
+ ): void {
this.finishReason = chunk.finishReason
this.isDone = true
this.completeAllToolCalls()
}
/**
- * Handle an error chunk
+ * Handle RUN_ERROR event (AG-UI)
*/
- private handleErrorChunk(
- chunk: Extract,
+ private handleRunErrorEvent(
+ chunk: Extract,
): void {
// Emit legacy handler
this.handlers.onError?.(chunk.error)
@@ -726,16 +820,16 @@ export class StreamProcessor {
}
/**
- * Handle a thinking chunk
+ * Handle STEP_FINISHED event (AG-UI) - for thinking/reasoning content
*/
- private handleThinkingChunk(
- chunk: Extract,
+ private handleStepFinishedEvent(
+ chunk: Extract,
): void {
const previous = this.thinkingContent
let nextThinking = previous
// Prefer delta over content
- if (chunk.delta !== '') {
+ if (chunk.delta && chunk.delta !== '') {
nextThinking = previous + chunk.delta
} else if (chunk.content !== '') {
if (chunk.content.startsWith(previous)) {
@@ -770,69 +864,57 @@ export class StreamProcessor {
}
/**
- * Handle an approval-requested chunk
+ * Handle CUSTOM event (AG-UI) - for approval flows and other custom events
*/
- private handleApprovalRequestedChunk(
- chunk: Extract,
+ private handleCustomEvent(
+ chunk: Extract,
): void {
- // Emit legacy handler
- this.handlers.onApprovalRequested?.(
- chunk.toolCallId,
- chunk.toolName,
- chunk.input,
- chunk.approval.id,
- )
+ // Handle approval-requested custom event
+ if (chunk.name === 'approval-requested') {
+ const value = chunk.value as {
+ toolCallId: string
+ toolName: string
+ input: any
+ approval: { id: string }
+ }
- // Update UIMessage with approval metadata
- if (this.currentAssistantMessageId) {
- this.messages = updateToolCallApproval(
- this.messages,
- this.currentAssistantMessageId,
- chunk.toolCallId,
- chunk.approval.id,
+ // Emit legacy handler
+ this.handlers.onApprovalRequested?.(
+ value.toolCallId,
+ value.toolName,
+ value.input,
+ value.approval.id,
)
- this.emitMessagesChange()
- }
- // Emit new event
- this.events.onApprovalRequest?.({
- toolCallId: chunk.toolCallId,
- toolName: chunk.toolName,
- input: chunk.input,
- approvalId: chunk.approval.id,
- })
- }
-
- /**
- * Handle a tool-input-available chunk
- */
- private handleToolInputAvailableChunk(
- chunk: Extract,
- ): void {
- // Emit legacy handler
- this.handlers.onToolInputAvailable?.(
- chunk.toolCallId,
- chunk.toolName,
- chunk.input,
- )
+ // Update UIMessage with approval metadata
+ if (this.currentAssistantMessageId) {
+ this.messages = updateToolCallApproval(
+ this.messages,
+ this.currentAssistantMessageId,
+ value.toolCallId,
+ value.approval.id,
+ )
+ this.emitMessagesChange()
+ }
- // Emit new event
- this.events.onToolCall?.({
- toolCallId: chunk.toolCallId,
- toolName: chunk.toolName,
- input: chunk.input,
- })
+ // Emit new event
+ this.events.onApprovalRequest?.({
+ toolCallId: value.toolCallId,
+ toolName: value.toolName,
+ input: value.input,
+ approvalId: value.approval.id,
+ })
+ }
}
/**
* Detect if an incoming content chunk represents a NEW text segment
*/
private isNewTextSegment(
- chunk: Extract,
+ chunk: Extract,
previous: string,
): boolean {
- // eslint-disable-next-line @typescript-eslint/no-unnecessary-condition
- if (chunk.delta !== undefined && chunk.content !== undefined) {
+ if (chunk.content !== undefined) {
if (chunk.content.length < previous.length) {
return true
}
@@ -1018,6 +1100,420 @@ export class StreamProcessor {
}
}
+ // ============================================
+ // Legacy Event Handlers (Backward Compatibility)
+ // ============================================
+
+ /**
+ * Handle legacy 'content' chunk type
+ */
+ private handleLegacyContentChunk(
+ chunk: Extract,
+ ): void {
+ // Convert to TEXT_MESSAGE_CONTENT handling
+ this.completeAllToolCalls()
+
+ const previousSegment = this.currentSegmentText
+
+ const isNewSegment =
+ this.hasToolCallsSinceTextStart &&
+ previousSegment.length > 0 &&
+ this.isNewTextSegmentFromLegacy(chunk, previousSegment)
+
+ if (isNewSegment) {
+ if (previousSegment !== this.lastEmittedText) {
+ this.emitTextUpdate()
+ }
+ this.currentSegmentText = ''
+ this.lastEmittedText = ''
+ this.hasToolCallsSinceTextStart = false
+ }
+
+ const currentText = this.currentSegmentText
+ let nextText = currentText
+
+ // In a new segment (after tool call), prefer content field if it contains more
+ // than just the delta (i.e., it includes accumulated text from before)
+ if (
+ isNewSegment &&
+ chunk.content &&
+ chunk.content !== '' &&
+ chunk.content.length > chunk.delta.length
+ ) {
+ // Use content field for accumulated text in new segments
+ nextText = chunk.content
+ } else if (chunk.delta && chunk.delta !== '') {
+ nextText = currentText + chunk.delta
+ } else if (chunk.content && chunk.content !== '') {
+ if (chunk.content.startsWith(currentText)) {
+ nextText = chunk.content
+ } else if (currentText.startsWith(chunk.content)) {
+ nextText = currentText
+ } else {
+ nextText = currentText + chunk.content
+ }
+ }
+
+ const textDelta = nextText.slice(currentText.length)
+ this.currentSegmentText = nextText
+ this.totalTextContent += textDelta
+
+ // Reset hasToolCallsSinceTextStart after processing content
+ // This prevents subsequent chunks in the same segment from being treated as new segments
+ this.hasToolCallsSinceTextStart = false
+
+ const chunkPortion = chunk.delta || chunk.content
+ const shouldEmit = this.chunkStrategy.shouldEmit(
+ chunkPortion,
+ this.currentSegmentText,
+ )
+ if (shouldEmit && this.currentSegmentText !== this.lastEmittedText) {
+ this.emitTextUpdate()
+ }
+ }
+
+ /**
+ * Check if this is a new text segment from legacy chunk
+ */
+ private isNewTextSegmentFromLegacy(
+ chunk: Extract,
+ previousSegment: string,
+ ): boolean {
+ if (!chunk.delta && !chunk.content) return false
+ const newContent = chunk.delta || chunk.content || ''
+ return (
+ !newContent.startsWith(previousSegment.slice(0, 10)) &&
+ !previousSegment.endsWith(newContent.slice(-10))
+ )
+ }
+
+ /**
+ * Handle legacy 'done' chunk type
+ */
+ private handleLegacyDoneChunk(
+ chunk: Extract,
+ ): void {
+ this.finishReason = chunk.finishReason ?? 'stop'
+ this.isDone = true
+ }
+
+ /**
+ * Handle legacy 'error' chunk type
+ */
+ private handleLegacyErrorChunk(
+ chunk: Extract,
+ ): void {
+ const errorMessage =
+ typeof chunk.error === 'string' ? chunk.error : chunk.error.message
+ const errorCode =
+ typeof chunk.error === 'object' ? chunk.error.code : chunk.code
+ this.handlers.onError?.(new Error(errorMessage))
+ this.events.onError?.(new Error(`${errorCode}: ${errorMessage}`))
+ }
+
+ /**
+ * Handle legacy 'tool_call' chunk type
+ */
+ private handleLegacyToolCallChunk(
+ chunk: Extract,
+ ): void {
+ this.hasToolCallsSinceTextStart = true
+
+ const toolCall = chunk.toolCall
+ const toolCallId = toolCall.id
+ const existingToolCall = this.toolCalls.get(toolCallId)
+
+ if (!existingToolCall) {
+ // New tool call - use approval-requested state if approval is needed
+ const initialState: ToolCallState = chunk.approval?.needsApproval
+ ? 'approval-requested'
+ : 'awaiting-input'
+
+ const actualIndex = this.toolCallOrder.length
+
+ const newToolCall: InternalToolCallState = {
+ id: toolCallId,
+ name: toolCall.function.name,
+ arguments: toolCall.function.arguments,
+ state: initialState,
+ parsedArguments: undefined,
+ index: actualIndex,
+ }
+
+ this.toolCalls.set(toolCallId, newToolCall)
+ this.toolCallOrder.push(toolCallId)
+
+ // Emit legacy lifecycle event
+ this.handlers.onToolCallStart?.(
+ actualIndex,
+ toolCallId,
+ toolCall.function.name,
+ )
+
+ // Emit legacy delta for initial arguments
+ if (toolCall.function.arguments) {
+ this.handlers.onToolCallDelta?.(
+ actualIndex,
+ toolCall.function.arguments,
+ )
+ }
+
+ // Update UIMessage
+ if (this.currentAssistantMessageId) {
+ this.messages = updateToolCallPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ {
+ id: toolCallId,
+ name: toolCall.function.name,
+ arguments: toolCall.function.arguments,
+ state: initialState,
+ },
+ )
+ if (chunk.approval) {
+ this.messages = updateToolCallApproval(
+ this.messages,
+ this.currentAssistantMessageId,
+ toolCallId,
+ chunk.approval.id,
+ )
+ }
+ this.emitMessagesChange()
+ }
+
+ this.handlers.onToolCallStateChange?.(
+ actualIndex,
+ toolCallId,
+ toolCall.function.name,
+ initialState,
+ toolCall.function.arguments,
+ this.jsonParser.parse(toolCall.function.arguments),
+ )
+
+ this.events.onToolCallStateChange?.(
+ this.currentAssistantMessageId || '',
+ toolCallId,
+ initialState,
+ toolCall.function.arguments,
+ )
+ } else {
+ // Update existing tool call arguments
+ existingToolCall.name = existingToolCall.name || toolCall.function.name
+ existingToolCall.arguments += toolCall.function.arguments
+
+ // Emit delta event for additional arguments
+ if (toolCall.function.arguments) {
+ this.handlers.onToolCallDelta?.(
+ existingToolCall.index,
+ toolCall.function.arguments,
+ )
+ }
+ }
+ }
+
+ /**
+ * Handle legacy 'tool_result' chunk type
+ */
+ private handleLegacyToolResultChunk(
+ chunk: Extract,
+ ): void {
+ const toolCallId = chunk.toolCallId
+ const existingToolCall = this.toolCalls.get(toolCallId)
+
+ if (existingToolCall) {
+ existingToolCall.state = 'input-complete'
+ }
+
+ if (this.currentAssistantMessageId) {
+ // Add a tool-result part (separate from the tool-call part)
+ this.messages = updateToolResultPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ toolCallId,
+ chunk.content,
+ 'complete',
+ )
+ this.emitMessagesChange()
+ }
+
+ this.handlers.onToolResultStateChange?.(
+ toolCallId,
+ chunk.content,
+ 'complete',
+ )
+ }
+
+ /**
+ * Handle legacy 'thinking' chunk type
+ */
+ private handleLegacyThinkingChunk(
+ chunk: Extract,
+ ): void {
+ const previousThinking = this.thinkingContent
+
+ if (chunk.delta && chunk.delta !== '') {
+ this.thinkingContent = previousThinking + chunk.delta
+ } else if (chunk.content) {
+ if (chunk.content.startsWith(previousThinking)) {
+ this.thinkingContent = chunk.content
+ } else if (previousThinking.startsWith(chunk.content)) {
+ // Current thinking already includes this content
+ } else {
+ this.thinkingContent = previousThinking + chunk.content
+ }
+ }
+
+ this.handlers.onThinkingUpdate?.(this.thinkingContent)
+
+ if (this.currentAssistantMessageId) {
+ this.messages = updateThinkingPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ this.thinkingContent,
+ )
+ this.emitMessagesChange()
+
+ // Emit new granular event
+ this.events.onThinkingUpdate?.(
+ this.currentAssistantMessageId,
+ this.thinkingContent,
+ )
+ }
+ }
+
+ /**
+ * Handle legacy 'approval-requested' chunk type
+ */
+ private handleLegacyApprovalRequestedChunk(
+ chunk: Extract,
+ ): void {
+ const toolCallId = chunk.toolCallId
+ const existingToolCall = this.toolCalls.get(toolCallId)
+
+ if (existingToolCall) {
+ existingToolCall.state = 'approval-requested'
+ existingToolCall.parsedArguments = chunk.input
+
+ if (this.currentAssistantMessageId) {
+ this.messages = updateToolCallPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ {
+ id: toolCallId,
+ name: existingToolCall.name,
+ arguments: JSON.stringify(chunk.input),
+ state: 'approval-requested',
+ },
+ )
+ if (chunk.approval) {
+ this.messages = updateToolCallApproval(
+ this.messages,
+ this.currentAssistantMessageId,
+ toolCallId,
+ chunk.approval.id,
+ )
+ }
+ this.emitMessagesChange()
+ }
+
+ this.handlers.onToolCallStateChange?.(
+ existingToolCall.index,
+ toolCallId,
+ chunk.toolName,
+ 'approval-requested',
+ JSON.stringify(chunk.input),
+ chunk.input,
+ )
+
+ this.events.onToolCallStateChange?.(
+ this.currentAssistantMessageId || '',
+ toolCallId,
+ 'approval-requested',
+ JSON.stringify(chunk.input),
+ )
+ }
+
+ // Always call onApprovalRequested and onApprovalRequest regardless of existingToolCall
+ this.handlers.onApprovalRequested?.(
+ toolCallId,
+ chunk.toolName,
+ chunk.input,
+ chunk.approval?.id || '',
+ )
+
+ this.events.onApprovalRequest?.({
+ toolCallId,
+ toolName: chunk.toolName,
+ input: chunk.input,
+ approvalId: chunk.approval?.id || '',
+ })
+ }
+
+ /**
+ * Handle legacy 'tool-input-available' chunk type
+ */
+ private handleLegacyToolInputAvailableChunk(
+ chunk: Extract,
+ ): void {
+ const toolCallId = chunk.toolCallId
+ const existingToolCall = this.toolCalls.get(toolCallId)
+
+ if (existingToolCall) {
+ existingToolCall.state = 'input-complete'
+ existingToolCall.parsedArguments = chunk.input
+
+ if (this.currentAssistantMessageId) {
+ this.messages = updateToolCallPart(
+ this.messages,
+ this.currentAssistantMessageId,
+ {
+ id: toolCallId,
+ name: existingToolCall.name,
+ arguments: JSON.stringify(chunk.input),
+ state: 'input-complete',
+ },
+ )
+ this.emitMessagesChange()
+ }
+
+ this.handlers.onToolCallStateChange?.(
+ existingToolCall.index,
+ toolCallId,
+ chunk.toolName,
+ 'input-complete',
+ JSON.stringify(chunk.input),
+ chunk.input,
+ )
+
+ this.events.onToolCallStateChange?.(
+ this.currentAssistantMessageId || '',
+ toolCallId,
+ 'input-complete',
+ JSON.stringify(chunk.input),
+ )
+
+ // Also invoke the onToolCall handler
+ this.events.onToolCall?.({
+ toolCallId,
+ toolName: chunk.toolName,
+ input: chunk.input,
+ })
+ }
+
+ // Always call onToolInputAvailable and onToolCall regardless of existingToolCall
+ this.handlers.onToolInputAvailable?.(
+ toolCallId,
+ chunk.toolName,
+ chunk.input,
+ )
+
+ this.events.onToolCall?.({
+ toolCallId,
+ toolName: chunk.toolName,
+ input: chunk.input,
+ })
+ }
+
/**
* Start recording chunks
*/
diff --git a/packages/typescript/ai/src/tools/tool-calls.ts b/packages/typescript/ai/src/tools/tool-calls.ts
index 48287245..e61348b1 100644
--- a/packages/typescript/ai/src/tools/tool-calls.ts
+++ b/packages/typescript/ai/src/tools/tool-calls.ts
@@ -1,10 +1,12 @@
import type {
- DoneStreamChunk,
ModelMessage,
+ RunFinishedEvent,
SchemaInput,
Tool,
ToolCall,
- ToolResultStreamChunk,
+ ToolCallArgsEvent,
+ ToolCallEndEvent,
+ ToolCallStartEvent,
} from '../types'
import type { z } from 'zod'
@@ -57,8 +59,49 @@ export class ToolCallManager {
}
/**
- * Add a tool call chunk to the accumulator
+ * Add a TOOL_CALL_START event to begin tracking a tool call
+ */
+ addToolCallStartEvent(chunk: ToolCallStartEvent): void {
+ const index = chunk.index ?? this.toolCallsMap.size
+ this.toolCallsMap.set(index, {
+ id: chunk.toolCallId,
+ type: 'function',
+ function: {
+ name: chunk.toolName,
+ arguments: '',
+ },
+ })
+ }
+
+ /**
+ * Add a TOOL_CALL_ARGS event to accumulate arguments
+ */
+ addToolCallArgsEvent(chunk: ToolCallArgsEvent): void {
+ // Find the tool call by ID
+ for (const [, toolCall] of this.toolCallsMap.entries()) {
+ if (toolCall.id === chunk.toolCallId) {
+ toolCall.function.arguments += chunk.delta
+ break
+ }
+ }
+ }
+
+ /**
+ * Complete a tool call with its final input
+ */
+ completeToolCall(toolCallId: string, input?: any): void {
+ for (const [, toolCall] of this.toolCallsMap.entries()) {
+ if (toolCall.id === toolCallId && input !== undefined) {
+ toolCall.function.arguments = JSON.stringify(input)
+ break
+ }
+ }
+ }
+
+ /**
+ * Add a tool call chunk to the accumulator (legacy format)
* Handles streaming tool calls by accumulating arguments
+ * @deprecated Use addToolCallStartEvent and addToolCallArgsEvent instead
*/
addToolCallChunk(chunk: {
toolCall: {
@@ -116,11 +159,11 @@ export class ToolCallManager {
/**
* Execute all tool calls and return tool result messages
- * Also yields tool_result chunks for streaming
+ * Also yields TOOL_CALL_END events for streaming
*/
async *executeTools(
- doneChunk: DoneStreamChunk,
- ): AsyncGenerator, void> {
+ doneChunk: RunFinishedEvent,
+ ): AsyncGenerator, void> {
const toolCallsArray = this.getToolCalls()
const toolResults: Array = []
@@ -181,14 +224,14 @@ export class ToolCallManager {
toolResultContent = `Tool ${toolCall.function.name} does not have an execute function`
}
- // Emit tool_result chunk so callers can track tool execution
+ // Emit TOOL_CALL_END event with result
yield {
- type: 'tool_result',
- id: doneChunk.id,
- model: doneChunk.model,
+ type: 'TOOL_CALL_END',
timestamp: Date.now(),
+ model: doneChunk.model,
toolCallId: toolCall.id,
- content: toolResultContent,
+ toolName: toolCall.function.name,
+ result: toolResultContent,
}
// Add tool result message
diff --git a/packages/typescript/ai/src/types.ts b/packages/typescript/ai/src/types.ts
index 748526c2..e1a078f5 100644
--- a/packages/typescript/ai/src/types.ts
+++ b/packages/typescript/ai/src/types.ts
@@ -581,51 +581,71 @@ export interface ChatOptions<
abortController?: AbortController
}
-export type StreamChunkType =
+// ============================================================================
+// AG-UI Protocol Event Types
+// ============================================================================
+
+/**
+ * AG-UI Protocol event types.
+ * Based on the AG-UI specification for agent-user interaction.
+ * @see https://docs.ag-ui.com/concepts/events
+ *
+ * Includes legacy type aliases for backward compatibility during migration.
+ */
+export type EventType =
+ // AG-UI Standard Events
+ | 'RUN_STARTED'
+ | 'RUN_FINISHED'
+ | 'RUN_ERROR'
+ | 'TEXT_MESSAGE_START'
+ | 'TEXT_MESSAGE_CONTENT'
+ | 'TEXT_MESSAGE_END'
+ | 'TOOL_CALL_START'
+ | 'TOOL_CALL_ARGS'
+ | 'TOOL_CALL_END'
+ | 'STEP_STARTED'
+ | 'STEP_FINISHED'
+ | 'STATE_SNAPSHOT'
+ | 'STATE_DELTA'
+ | 'CUSTOM'
+ // Legacy types (deprecated, for backward compatibility)
| 'content'
- | 'tool_call'
- | 'tool_result'
| 'done'
| 'error'
+ | 'tool_call'
+ | 'tool_result'
+ | 'thinking'
| 'approval-requested'
| 'tool-input-available'
- | 'thinking'
-export interface BaseStreamChunk {
- type: StreamChunkType
- id: string
- model: string
+/**
+ * Base structure for all AG-UI events.
+ * Extends AG-UI spec with TanStack AI additions (model field).
+ */
+export interface BaseEvent {
+ type: EventType
timestamp: number
+ /** TanStack AI addition: Model identifier for multi-model support */
+ model?: string
+ /** Original provider event for debugging/advanced use cases */
+ rawEvent?: unknown
}
-export interface ContentStreamChunk extends BaseStreamChunk {
- type: 'content'
- delta: string // The incremental content token
- content: string // Full accumulated content so far
- role?: 'assistant'
-}
-
-export interface ToolCallStreamChunk extends BaseStreamChunk {
- type: 'tool_call'
- toolCall: {
- id: string
- type: 'function'
- function: {
- name: string
- arguments: string // Incremental JSON arguments
- }
- }
- index: number
-}
-
-export interface ToolResultStreamChunk extends BaseStreamChunk {
- type: 'tool_result'
- toolCallId: string
- content: string
+/**
+ * Emitted when a run starts.
+ */
+export interface RunStartedEvent extends BaseEvent {
+ type: 'RUN_STARTED'
+ runId: string
+ threadId?: string
}
-export interface DoneStreamChunk extends BaseStreamChunk {
- type: 'done'
+/**
+ * Emitted when a run completes successfully.
+ */
+export interface RunFinishedEvent extends BaseEvent {
+ type: 'RUN_FINISHED'
+ runId: string
finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null
usage?: {
promptTokens: number
@@ -634,50 +654,292 @@ export interface DoneStreamChunk extends BaseStreamChunk {
}
}
-export interface ErrorStreamChunk extends BaseStreamChunk {
- type: 'error'
+/**
+ * Emitted when an error occurs during a run.
+ */
+export interface RunErrorEvent extends BaseEvent {
+ type: 'RUN_ERROR'
+ runId?: string
error: {
message: string
code?: string
}
}
-export interface ApprovalRequestedStreamChunk extends BaseStreamChunk {
- type: 'approval-requested'
+/**
+ * Emitted when a text message starts.
+ */
+export interface TextMessageStartEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_START'
+ messageId: string
+ role: 'assistant'
+}
+
+/**
+ * Emitted when text content is generated (streaming tokens).
+ */
+export interface TextMessageContentEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_CONTENT'
+ messageId: string
+ delta: string
+ /** TanStack AI addition: Full accumulated content so far */
+ content?: string
+}
+
+/**
+ * Emitted when a text message completes.
+ */
+export interface TextMessageEndEvent extends BaseEvent {
+ type: 'TEXT_MESSAGE_END'
+ messageId: string
+}
+
+/**
+ * Emitted when a tool call starts.
+ */
+export interface ToolCallStartEvent extends BaseEvent {
+ type: 'TOOL_CALL_START'
toolCallId: string
toolName: string
- input: any
- approval: {
+ /** Index for parallel tool calls */
+ index?: number
+ /** Approval metadata if tool requires user approval */
+ approval?: {
id: string
needsApproval: true
}
}
-export interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
- type: 'tool-input-available'
+/**
+ * Emitted when tool call arguments are streaming.
+ */
+export interface ToolCallArgsEvent extends BaseEvent {
+ type: 'TOOL_CALL_ARGS'
+ toolCallId: string
+ /** Incremental JSON arguments delta */
+ delta: string
+ /** Full accumulated arguments so far */
+ args?: string
+}
+
+/**
+ * Emitted when a tool call completes (with optional result).
+ */
+export interface ToolCallEndEvent extends BaseEvent {
+ type: 'TOOL_CALL_END'
toolCallId: string
toolName: string
- input: any
+ /** Final parsed input arguments */
+ input?: unknown
+ /** Tool execution result (present when tool has executed) */
+ result?: unknown
}
-export interface ThinkingStreamChunk extends BaseStreamChunk {
- type: 'thinking'
- delta?: string // The incremental thinking token
- content: string // Full accumulated thinking content so far
+/**
+ * Emitted when a reasoning/thinking step starts.
+ */
+export interface StepStartedEvent extends BaseEvent {
+ type: 'STEP_STARTED'
+ stepId: string
+ stepType: 'thinking' | 'reasoning' | 'planning'
}
/**
- * Chunk returned by the sdk during streaming chat completions.
+ * Emitted when a reasoning/thinking step completes or streams content.
+ */
+export interface StepFinishedEvent extends BaseEvent {
+ type: 'STEP_FINISHED'
+ stepId: string
+ /** Incremental thinking token */
+ delta?: string
+ /** Full accumulated thinking content */
+ content: string
+}
+
+/**
+ * Emitted for full state synchronization.
+ */
+export interface StateSnapshotEvent extends BaseEvent {
+ type: 'STATE_SNAPSHOT'
+ state: Record
+}
+
+/**
+ * Emitted for incremental state updates.
+ */
+export interface StateDeltaEvent extends BaseEvent {
+ type: 'STATE_DELTA'
+ delta: Array<{
+ op: 'add' | 'remove' | 'replace'
+ path: string
+ value?: unknown
+ }>
+}
+
+/**
+ * Custom event for extensibility.
+ * Used for features not covered by standard AG-UI events (e.g., approval flows).
+ */
+export interface CustomEvent extends BaseEvent {
+ type: 'CUSTOM'
+ name: string
+ value: unknown
+}
+
+/**
+ * Union type for all AG-UI events.
+ * This is the primary type for streaming chat completions.
+ * Includes legacy types for backward compatibility.
*/
export type StreamChunk =
+ // AG-UI Standard Events
+ | RunStartedEvent
+ | RunFinishedEvent
+ | RunErrorEvent
+ | TextMessageStartEvent
+ | TextMessageContentEvent
+ | TextMessageEndEvent
+ | ToolCallStartEvent
+ | ToolCallArgsEvent
+ | ToolCallEndEvent
+ | StepStartedEvent
+ | StepFinishedEvent
+ | StateSnapshotEvent
+ | StateDeltaEvent
+ | CustomEvent
+ // Legacy types (deprecated)
| ContentStreamChunk
- | ToolCallStreamChunk
- | ToolResultStreamChunk
| DoneStreamChunk
| ErrorStreamChunk
+ | ToolCallStreamChunk
+ | ToolResultStreamChunk
+ | ThinkingStreamChunk
| ApprovalRequestedStreamChunk
| ToolInputAvailableStreamChunk
- | ThinkingStreamChunk
+
+// Legacy type aliases for transition (can be removed in future version)
+export type StreamChunkType = EventType
+
+// ============================================================================
+// Legacy Chunk Type Aliases (Deprecated - for backward compatibility)
+// ============================================================================
+// These types provide backward compatibility during the transition to AG-UI.
+// They map old chunk type names to the new AG-UI event types.
+// These will be removed in a future major version.
+
+/**
+ * @deprecated Use TextMessageContentEvent instead
+ */
+export interface ContentStreamChunk {
+ type: 'content'
+ id: string
+ model: string
+ timestamp: number
+ /** Incremental text delta */
+ delta: string
+ /** Full accumulated content so far */
+ content: string
+ /** Role of the message */
+ role?: 'assistant'
+}
+
+/**
+ * @deprecated Use RunFinishedEvent instead
+ */
+export interface DoneStreamChunk {
+ type: 'done'
+ id: string
+ model: string
+ timestamp: number
+ finishReason?: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null
+ usage?: {
+ promptTokens: number
+ completionTokens: number
+ totalTokens: number
+ }
+}
+
+/**
+ * @deprecated Use RunErrorEvent instead
+ */
+export interface ErrorStreamChunk {
+ type: 'error'
+ id: string
+ model: string
+ timestamp: number
+ error: string | { message: string; code?: string }
+ code?: string
+}
+
+/**
+ * @deprecated Use ToolCallStartEvent and ToolCallArgsEvent instead
+ */
+export interface ToolCallStreamChunk {
+ type: 'tool_call'
+ id: string
+ model: string
+ timestamp: number
+ toolCall: ToolCall
+ index: number
+ approval?: {
+ id: string
+ needsApproval: true
+ }
+}
+
+/**
+ * @deprecated Use ToolCallEndEvent instead
+ */
+export interface ToolResultStreamChunk {
+ type: 'tool_result'
+ id: string
+ model: string
+ timestamp: number
+ toolCallId: string
+ content: string
+}
+
+/**
+ * @deprecated Use StepStartedEvent/StepFinishedEvent instead
+ */
+export interface ThinkingStreamChunk {
+ type: 'thinking'
+ id: string
+ model: string
+ timestamp: number
+ delta?: string
+ content: string
+}
+
+/**
+ * @deprecated Use CustomEvent with name='approval-requested' instead
+ */
+export interface ApprovalRequestedStreamChunk {
+ type: 'approval-requested'
+ id: string
+ model: string
+ timestamp: number
+ toolCallId: string
+ toolName: string
+ input: Record
+ approval?: {
+ id: string
+ needsApproval: true
+ }
+}
+
+/**
+ * @deprecated Use CustomEvent with name='tool-input-available' instead
+ */
+export interface ToolInputAvailableStreamChunk {
+ type: 'tool-input-available'
+ id: string
+ model: string
+ timestamp: number
+ toolCallId: string
+ toolName: string
+ input: Record
+}
// Simple streaming format for basic chat completions
// Converted to StreamChunk format by convertChatCompletionStream()
diff --git a/packages/typescript/ai/src/utilities/stream-to-response.ts b/packages/typescript/ai/src/utilities/stream-to-response.ts
index 19069204..186b9cbd 100644
--- a/packages/typescript/ai/src/utilities/stream-to-response.ts
+++ b/packages/typescript/ai/src/utilities/stream-to-response.ts
@@ -50,11 +50,12 @@ export function toServerSentEventsStream(
return
}
- // Send error chunk
+ // Send error event (AG-UI RUN_ERROR)
controller.enqueue(
encoder.encode(
`data: ${JSON.stringify({
- type: 'error',
+ type: 'RUN_ERROR',
+ timestamp: Date.now(),
error: {
message: error.message || 'Unknown error occurred',
code: error.code,
diff --git a/packages/typescript/ai/tests/ai-chat.test.ts b/packages/typescript/ai/tests/ai-chat.test.ts
index 2ec8a650..75e0ed1f 100644
--- a/packages/typescript/ai/tests/ai-chat.test.ts
+++ b/packages/typescript/ai/tests/ai-chat.test.ts
@@ -455,7 +455,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
expect(tool.execute).toHaveBeenCalledWith({ location: 'Paris' })
expect(adapter.chatStreamCallCount).toBeGreaterThanOrEqual(2)
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks).toHaveLength(1)
// Check events
@@ -560,7 +562,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
// Tool should be executed with complete arguments
expect(tool.execute).toHaveBeenCalledWith({ a: 10, b: 20 })
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks.length).toBeGreaterThan(0)
})
@@ -652,7 +656,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
expect(tool1.execute).toHaveBeenCalled()
expect(tool2.execute).toHaveBeenCalled()
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks).toHaveLength(2)
// Check iteration event
@@ -950,12 +956,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
}),
)
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks).toHaveLength(1)
const resultChunk = toolResultChunks[0] as any
- const result = JSON.parse(resultChunk.content)
- expect(result.result).toBe('success')
+ expect(resultChunk.result.result).toBe('success')
// Check tool:call-completed event
const completedEvents = capturedEvents.filter(
@@ -1104,12 +1111,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
}),
)
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks).toHaveLength(1)
const resultChunk = toolResultChunks[0] as any
- const result = JSON.parse(resultChunk.content)
- expect(result.error).toBe('Tool execution failed')
+ expect(resultChunk.result.error).toBe('Tool execution failed')
})
it('should handle unknown tool calls', async () => {
@@ -1158,12 +1166,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
)
// Should still produce a tool_result with error
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
expect(toolResultChunks.length).toBeGreaterThan(0)
const resultChunk = toolResultChunks[0] as any
- const result = JSON.parse(resultChunk.content)
- expect(result.error).toContain('Unknown tool')
+ expect(resultChunk.result.error).toContain('Unknown tool')
})
})
@@ -1219,13 +1228,13 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
)
const approvalChunks = chunks.filter(
- (c) => c.type === 'approval-requested',
+ (c) => c.type === 'CUSTOM' && (c as any).name === 'approval-requested',
)
expect(approvalChunks).toHaveLength(1)
const approvalChunk = approvalChunks[0] as any
- expect(approvalChunk.toolName).toBe('delete_file')
- expect(approvalChunk.approval.needsApproval).toBe(true)
+ expect(approvalChunk.value.toolName).toBe('delete_file')
+ expect(approvalChunk.value.approval.needsApproval).toBe(true)
// Tool should NOT be executed yet
expect(tool.execute).not.toHaveBeenCalled()
@@ -1285,13 +1294,14 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
)
const inputChunks = chunks.filter(
- (c) => c.type === 'tool-input-available',
+ (c) =>
+ c.type === 'CUSTOM' && (c as any).name === 'tool-input-available',
)
expect(inputChunks).toHaveLength(1)
const inputChunk = inputChunks[0] as any
- expect(inputChunk.toolName).toBe('client_tool')
- expect(inputChunk.input).toEqual({ input: 'test' })
+ expect(inputChunk.value.toolName).toBe('client_tool')
+ expect(inputChunk.value.input).toEqual({ input: 'test' })
// Should emit tool-input-available event
expect(
@@ -1387,10 +1397,11 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
// Approval and client tools should request intervention
const approvalChunks = chunks.filter(
- (c) => c.type === 'approval-requested',
+ (c) => c.type === 'CUSTOM' && (c as any).name === 'approval-requested',
)
const inputChunks = chunks.filter(
- (c) => c.type === 'tool-input-available',
+ (c) =>
+ c.type === 'CUSTOM' && (c as any).name === 'tool-input-available',
)
expect(approvalChunks.length + inputChunks.length).toBeGreaterThan(0)
@@ -1488,7 +1499,7 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
})
const chunks = await collectChunks(stream)
- expect(chunks[0]?.type).toBe('tool_result')
+ expect(chunks[0]?.type).toBe('TOOL_CALL_END')
expect(toolExecute).toHaveBeenCalledWith({ path: '/tmp/test.txt' })
expect(adapter.chatStreamCallCount).toBe(1)
})
@@ -2408,26 +2419,25 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
async *chatStream(options: ChatOptions): AsyncIterable {
this.trackStreamCall(options)
yield {
- type: 'content',
- id: 'test-id',
+ type: 'TEXT_MESSAGE_CONTENT',
model: 'test-model',
+ messageId: 'msg-1',
timestamp: Date.now(),
delta: 'Using tool',
content: 'Using tool',
- role: 'assistant',
}
- // Adapter sends tool_result chunk directly (from previous execution)
+ // Adapter sends TOOL_CALL_END with result (from previous execution)
yield {
- type: 'tool_result',
- id: 'test-id',
+ type: 'TOOL_CALL_END',
model: 'test-model',
timestamp: Date.now(),
toolCallId: 'call-previous',
- content: JSON.stringify({ result: 'previous result' }),
+ toolName: 'previousTool',
+ result: { result: 'previous result' },
}
yield {
- type: 'done',
- id: 'test-id',
+ type: 'RUN_FINISHED',
+ runId: 'test-run',
model: 'test-model',
timestamp: Date.now(),
finishReason: 'stop',
@@ -2451,7 +2461,7 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
)
expect(toolResultEvents.length).toBeGreaterThan(0)
expect(toolResultEvents[0]?.data.toolCallId).toBe('call-previous')
- expect(toolResultEvents[0]?.data.result).toBe(
+ expect(toolResultEvents[0]?.data.result).toEqual(
JSON.stringify({ result: 'previous result' }),
)
})
@@ -2554,7 +2564,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
})
const chunks1 = await collectChunks(stream1)
- const approvalChunk = chunks1.find((c) => c.type === 'approval-requested')
+ const approvalChunk = chunks1.find(
+ (c) => c.type === 'CUSTOM' && (c as any).name === 'approval-requested',
+ )
expect(approvalChunk).toBeDefined()
// Second call - with approval response in message parts
@@ -2677,7 +2689,10 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
})
const chunks1 = await collectChunks(stream1)
- const inputChunk = chunks1.find((c) => c.type === 'tool-input-available')
+ const inputChunk = chunks1.find(
+ (c) =>
+ c.type === 'CUSTOM' && (c as any).name === 'tool-input-available',
+ )
expect(inputChunk).toBeDefined()
// Second call - with client tool output in message parts
@@ -2976,7 +2991,9 @@ describe('chat() - Comprehensive Logic Path Coverage', () => {
const chunks = await collectChunks(stream)
const toolCallChunks = chunks.filter((c) => c.type === 'tool_call')
- const toolResultChunks = chunks.filter((c) => c.type === 'tool_result')
+ const toolResultChunks = chunks.filter(
+ (c) => c.type === 'TOOL_CALL_END' && (c as any).result !== undefined,
+ )
// We should have received tool_call chunks
expect(toolCallChunks.length).toBeGreaterThan(0)
diff --git a/packages/typescript/ai/tests/stream-to-response.test.ts b/packages/typescript/ai/tests/stream-to-response.test.ts
index 38bc3853..713278df 100644
--- a/packages/typescript/ai/tests/stream-to-response.test.ts
+++ b/packages/typescript/ai/tests/stream-to-response.test.ts
@@ -227,7 +227,7 @@ describe('toServerSentEventsStream', () => {
const sseStream = toServerSentEventsStream(errorStream())
const output = await readStream(sseStream)
- expect(output).toContain('"type":"error"')
+ expect(output).toContain('"type":"RUN_ERROR"')
expect(output).toContain('"message":"Stream error"')
})
diff --git a/packages/typescript/ai/tests/tool-call-manager.test.ts b/packages/typescript/ai/tests/tool-call-manager.test.ts
index 9d74205c..aa4a81d7 100644
--- a/packages/typescript/ai/tests/tool-call-manager.test.ts
+++ b/packages/typescript/ai/tests/tool-call-manager.test.ts
@@ -1,12 +1,12 @@
import { describe, expect, it, vi } from 'vitest'
import { z } from 'zod'
import { ToolCallManager } from '../src/tools/tool-calls'
-import type { DoneStreamChunk, Tool } from '../src/types'
+import type { RunFinishedEvent, Tool } from '../src/types'
describe('ToolCallManager', () => {
- const mockDoneChunk: DoneStreamChunk = {
- type: 'done',
- id: 'test-id',
+ const mockDoneChunk: RunFinishedEvent = {
+ type: 'RUN_FINISHED',
+ runId: 'test-run',
model: 'gpt-4',
timestamp: Date.now(),
finishReason: 'tool_calls',
@@ -94,7 +94,7 @@ describe('ToolCallManager', () => {
expect(toolCalls[0]?.id).toBe('call_123')
})
- it('should execute tools and emit tool_result chunks', async () => {
+ it('should execute tools and emit TOOL_CALL_END events', async () => {
const manager = new ToolCallManager([mockWeatherTool])
manager.addToolCallChunk({
@@ -109,11 +109,11 @@ describe('ToolCallManager', () => {
const { chunks: emittedChunks, result: finalResult } =
await collectGeneratorOutput(manager.executeTools(mockDoneChunk))
- // Should emit one tool_result chunk
+ // Should emit one TOOL_CALL_END event
expect(emittedChunks).toHaveLength(1)
- expect(emittedChunks[0]?.type).toBe('tool_result')
+ expect(emittedChunks[0]?.type).toBe('TOOL_CALL_END')
expect(emittedChunks[0]?.toolCallId).toBe('call_123')
- expect(emittedChunks[0]?.content).toContain('temp')
+ expect(emittedChunks[0]?.result).toContain('temp')
// Should return one tool result message
expect(finalResult).toHaveLength(1)
@@ -150,9 +150,9 @@ describe('ToolCallManager', () => {
manager.executeTools(mockDoneChunk),
)
- // Should still emit chunk with error message
+ // Should still emit event with error message
expect(chunks).toHaveLength(1)
- expect(chunks[0]?.content).toContain('Error executing tool: Tool failed')
+ expect(chunks[0]?.result).toContain('Error executing tool: Tool failed')
// Should still return tool result message
expect(toolResults).toHaveLength(1)
@@ -182,7 +182,7 @@ describe('ToolCallManager', () => {
manager.executeTools(mockDoneChunk),
)
- expect(chunks[0]?.content).toContain('does not have an execute function')
+ expect(chunks[0]?.result).toContain('does not have an execute function')
expect(toolResults[0]?.content).toContain(
'does not have an execute function',
)
@@ -248,7 +248,7 @@ describe('ToolCallManager', () => {
manager.executeTools(mockDoneChunk),
)
- // Should emit two tool_result chunks
+ // Should emit two TOOL_CALL_END events
expect(chunks).toHaveLength(2)
expect(chunks[0]?.toolCallId).toBe('call_weather')
expect(chunks[1]?.toolCallId).toBe('call_calc')
diff --git a/packages/typescript/smoke-tests/adapters/src/harness.ts b/packages/typescript/smoke-tests/adapters/src/harness.ts
index 25d01ee6..5e96825c 100644
--- a/packages/typescript/smoke-tests/adapters/src/harness.ts
+++ b/packages/typescript/smoke-tests/adapters/src/harness.ts
@@ -168,78 +168,115 @@ export async function captureStream(opts: {
model: chunk.model,
}
- if (chunk.type === 'content') {
+ if (chunk.type === 'TEXT_MESSAGE_CONTENT') {
chunkData.delta = chunk.delta
chunkData.content = chunk.content
- chunkData.role = chunk.role
- const delta = chunk.delta || chunk.content || ''
+ const delta = chunk.delta || ''
fullResponse += delta
- if (chunk.role === 'assistant') {
- if (!assistantDraft) {
- assistantDraft = {
- role: 'assistant',
- content: chunk.content || '',
- toolCalls: [],
- }
- } else {
- assistantDraft.content = (assistantDraft.content || '') + delta
+ if (!assistantDraft) {
+ assistantDraft = {
+ role: 'assistant',
+ content: delta,
+ toolCalls: [],
}
+ } else {
+ assistantDraft.content = (assistantDraft.content || '') + delta
}
- } else if (chunk.type === 'tool_call') {
- const id = chunk.toolCall.id
+ } else if (chunk.type === 'TOOL_CALL_START') {
+ const id = chunk.toolCallId
const existing = toolCallMap.get(id) || {
id,
- name: chunk.toolCall.function.name,
+ name: chunk.toolName,
arguments: '',
}
- existing.arguments += chunk.toolCall.function.arguments || ''
toolCallMap.set(id, existing)
- chunkData.toolCall = chunk.toolCall
+ chunkData.toolCallId = chunk.toolCallId
+ chunkData.toolName = chunk.toolName
if (!assistantDraft) {
assistantDraft = { role: 'assistant', content: null, toolCalls: [] }
}
- const existingToolCall = assistantDraft.toolCalls?.find(
- (tc: any) => tc.id === id,
- )
- if (existingToolCall) {
- existingToolCall.function.arguments = existing.arguments
- } else {
- assistantDraft.toolCalls?.push({
- ...chunk.toolCall,
- function: {
- ...chunk.toolCall.function,
- arguments: existing.arguments,
- },
- })
+ assistantDraft.toolCalls?.push({
+ id,
+ type: 'function',
+ function: {
+ name: chunk.toolName,
+ arguments: '',
+ },
+ })
+ } else if (chunk.type === 'TOOL_CALL_ARGS') {
+ const id = chunk.toolCallId
+ const existing = toolCallMap.get(id)
+ if (existing) {
+ existing.arguments += chunk.delta || ''
+ toolCallMap.set(id, existing)
}
- } else if (chunk.type === 'tool_result') {
+
chunkData.toolCallId = chunk.toolCallId
- chunkData.content = chunk.content
- toolResults.push({
- toolCallId: chunk.toolCallId,
- content: chunk.content,
- })
- reconstructedMessages.push({
- role: 'tool',
- toolCallId: chunk.toolCallId,
- content: chunk.content,
- })
- } else if (chunk.type === 'approval-requested') {
- const approval: ApprovalCapture = {
- toolCallId: chunk.toolCallId,
- toolName: chunk.toolName,
- input: chunk.input,
- approval: chunk.approval,
+ chunkData.delta = chunk.delta
+
+ if (assistantDraft) {
+ const existingToolCall = assistantDraft.toolCalls?.find(
+ (tc: any) => tc.id === id,
+ )
+ if (existingToolCall) {
+ existingToolCall.function.arguments += chunk.delta || ''
+ }
}
+ } else if (chunk.type === 'TOOL_CALL_END') {
chunkData.toolCallId = chunk.toolCallId
- chunkData.toolName = chunk.toolName
- chunkData.input = chunk.input
- chunkData.approval = chunk.approval
+
+ // Capture input/arguments from TOOL_CALL_END (OpenAI sends complete args here)
+ if (chunk.input !== undefined) {
+ const id = chunk.toolCallId
+ const existing = toolCallMap.get(id)
+ if (existing) {
+ existing.arguments = JSON.stringify(chunk.input)
+ toolCallMap.set(id, existing)
+ }
+
+ // Update the assistant draft's tool call arguments
+ if (assistantDraft) {
+ const existingToolCall = assistantDraft.toolCalls?.find(
+ (tc: any) => tc.id === id,
+ )
+ if (existingToolCall) {
+ existingToolCall.function.arguments = JSON.stringify(chunk.input)
+ }
+ }
+ }
+
+ // Tool result is included in TOOL_CALL_END for server-executed tools
+ if (chunk.result !== undefined) {
+ const content =
+ typeof chunk.result === 'string'
+ ? chunk.result
+ : JSON.stringify(chunk.result)
+ toolResults.push({
+ toolCallId: chunk.toolCallId,
+ content,
+ })
+ reconstructedMessages.push({
+ role: 'tool',
+ toolCallId: chunk.toolCallId,
+ content,
+ })
+ }
+ } else if (chunk.type === 'CUSTOM' && chunk.name === 'approval-requested') {
+ const approval: ApprovalCapture = {
+ toolCallId: chunk.value.toolCallId,
+ toolName: chunk.value.toolName,
+ input: chunk.value.input,
+ approval: chunk.value.approval,
+ }
+ chunkData.toolCallId = chunk.value.toolCallId
+ chunkData.toolName = chunk.value.toolName
+ chunkData.input = chunk.value.input
+ chunkData.approval = chunk.value.approval
approvalRequests.push(approval)
- } else if (chunk.type === 'done') {
+ } else if (chunk.type === 'RUN_FINISHED') {
chunkData.finishReason = chunk.finishReason
chunkData.usage = chunk.usage
if (chunk.finishReason === 'stop' && assistantDraft) {
diff --git a/packages/typescript/smoke-tests/adapters/src/index.ts b/packages/typescript/smoke-tests/adapters/src/index.ts
index 43e19f41..87f2dfd1 100644
--- a/packages/typescript/smoke-tests/adapters/src/index.ts
+++ b/packages/typescript/smoke-tests/adapters/src/index.ts
@@ -1,19 +1,18 @@
import { config } from 'dotenv'
+
import {
- chat,
embedding,
+ maxIterations,
summarize,
toolDefinition,
- maxIterations,
- type Tool,
} from '@tanstack/ai'
import { z } from 'zod'
import { createAnthropic } from '@tanstack/ai-anthropic'
import { createGemini } from '@tanstack/ai-gemini'
import { ollama } from '@tanstack/ai-ollama'
import { createOpenAI } from '@tanstack/ai-openai'
+
import {
- AdapterContext,
buildApprovalMessages,
captureStream,
createDebugEnvelope,
@@ -22,6 +21,10 @@ import {
writeDebugFile,
} from './harness'
+import type { Tool } from '@tanstack/ai'
+
+import type { AdapterContext } from './harness'
+
// Load .env.local first (higher priority), then .env
config({ path: '.env.local' })
config({ path: '.env' })
@@ -38,7 +41,7 @@ const OPENAI_SUMMARY_MODEL = process.env.OPENAI_SUMMARY_MODEL || OPENAI_MODEL
const OPENAI_EMBEDDING_MODEL =
process.env.OPENAI_EMBEDDING_MODEL || 'text-embedding-3-small'
-const GEMINI_MODEL = process.env.GEMINI_MODEL || 'gemini-2.0-flash-lite'
+const GEMINI_MODEL = process.env.GEMINI_MODEL || 'gemini-2.5-flash-lite'
const GEMINI_SUMMARY_MODEL = process.env.GEMINI_SUMMARY_MODEL || GEMINI_MODEL
const GEMINI_EMBEDDING_MODEL =
process.env.GEMINI_EMBEDDING_MODEL || 'gemini-embedding-001'
diff --git a/packages/typescript/smoke-tests/package.json b/packages/typescript/smoke-tests/package.json
index 1146be21..a1d570e7 100644
--- a/packages/typescript/smoke-tests/package.json
+++ b/packages/typescript/smoke-tests/package.json
@@ -1,4 +1,5 @@
{
"name": "smoke-tests",
+ "version": "0.0.0",
"private": true
}
diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml
index 214dfb20..aa55e008 100644
--- a/pnpm-lock.yaml
+++ b/pnpm-lock.yaml
@@ -605,13 +605,13 @@ importers:
partial-json:
specifier: ^0.1.7
version: 0.1.7
+ zod:
+ specifier: ^4.1.13
+ version: 4.1.13
devDependencies:
'@vitest/coverage-v8':
specifier: 4.0.14
version: 4.0.14(vitest@4.0.14(@types/node@24.10.1)(happy-dom@20.0.10)(jiti@2.6.1)(jsdom@27.2.0(postcss@8.5.6))(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
- zod:
- specifier: ^4.1.13
- version: 4.1.13
packages/typescript/ai-anthropic:
dependencies:
@@ -1133,6 +1133,100 @@ importers:
specifier: ^2.11.10
version: 2.11.10(@testing-library/jest-dom@6.9.1)(solid-js@1.9.10)(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ testing/panel:
+ dependencies:
+ '@alcyone-labs/zod-to-json-schema':
+ specifier: ^4.0.10
+ version: 4.0.10(zod@4.1.13)
+ '@tailwindcss/vite':
+ specifier: ^4.1.17
+ version: 4.1.17(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ '@tanstack/ai':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai
+ '@tanstack/ai-anthropic':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-anthropic
+ '@tanstack/ai-client':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-client
+ '@tanstack/ai-gemini':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-gemini
+ '@tanstack/ai-openai':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-openai
+ '@tanstack/ai-react':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-react
+ '@tanstack/ai-react-ui':
+ specifier: workspace:*
+ version: link:../../packages/typescript/ai-react-ui
+ '@tanstack/nitro-v2-vite-plugin':
+ specifier: ^1.139.0
+ version: 1.139.0(rolldown@1.0.0-beta.53)(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ '@tanstack/react-router':
+ specifier: ^1.139.7
+ version: 1.139.7(react-dom@19.2.0(react@19.2.0))(react@19.2.0)
+ '@tanstack/react-start':
+ specifier: ^1.139.8
+ version: 1.139.8(react-dom@19.2.0(react@19.2.0))(react@19.2.0)(vite-plugin-solid@2.11.10(@testing-library/jest-dom@6.9.1)(solid-js@1.9.10)(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)))(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ highlight.js:
+ specifier: ^11.11.1
+ version: 11.11.1
+ lucide-react:
+ specifier: ^0.555.0
+ version: 0.555.0(react@19.2.0)
+ react:
+ specifier: ^19.2.0
+ version: 19.2.0
+ react-dom:
+ specifier: ^19.2.0
+ version: 19.2.0(react@19.2.0)
+ react-markdown:
+ specifier: ^10.1.0
+ version: 10.1.0(@types/react@19.2.7)(react@19.2.0)
+ rehype-highlight:
+ specifier: ^7.0.2
+ version: 7.0.2
+ rehype-raw:
+ specifier: ^7.0.0
+ version: 7.0.0
+ rehype-sanitize:
+ specifier: ^6.0.0
+ version: 6.0.0
+ remark-gfm:
+ specifier: ^4.0.1
+ version: 4.0.1
+ tailwindcss:
+ specifier: ^4.1.17
+ version: 4.1.17
+ vite-tsconfig-paths:
+ specifier: ^5.1.4
+ version: 5.1.4(typescript@5.9.3)(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ zod:
+ specifier: ^4.1.13
+ version: 4.1.13
+ devDependencies:
+ '@types/node':
+ specifier: ^24.10.1
+ version: 24.10.1
+ '@types/react':
+ specifier: ^19.2.7
+ version: 19.2.7
+ '@types/react-dom':
+ specifier: ^19.2.3
+ version: 19.2.3(@types/react@19.2.7)
+ '@vitejs/plugin-react':
+ specifier: ^5.1.1
+ version: 5.1.1(vite@7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1))
+ typescript:
+ specifier: 5.9.3
+ version: 5.9.3
+ vite:
+ specifier: ^7.2.4
+ version: 7.2.4(@types/node@24.10.1)(jiti@2.6.1)(lightningcss@1.30.2)(terser@5.44.0)(tsx@4.20.6)(yaml@2.8.1)
+
packages:
'@acemir/cssom@0.9.24':
diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml
index 9ffeb0fe..47ed9ebd 100644
--- a/pnpm-workspace.yaml
+++ b/pnpm-workspace.yaml
@@ -6,3 +6,4 @@ packages:
- 'packages/typescript/*'
- 'packages/typescript/smoke-tests/*'
- 'examples/*'
+ - 'testing/*'
diff --git a/testing/panel/package.json b/testing/panel/package.json
index bb38ee26..1c887439 100644
--- a/testing/panel/package.json
+++ b/testing/panel/package.json
@@ -8,6 +8,7 @@
"preview": "vite preview"
},
"dependencies": {
+ "@alcyone-labs/zod-to-json-schema": "^4.0.10",
"@tailwindcss/vite": "^4.1.17",
"@tanstack/ai": "workspace:*",
"@tanstack/ai-anthropic": "workspace:*",
@@ -16,22 +17,21 @@
"@tanstack/ai-openai": "workspace:*",
"@tanstack/ai-react": "workspace:*",
"@tanstack/ai-react-ui": "workspace:*",
- "@tanstack/nitro-v2-vite-plugin": "^1.139.7",
+ "@tanstack/nitro-v2-vite-plugin": "^1.139.0",
"@tanstack/react-router": "^1.139.7",
- "@tanstack/react-start": "^1.139.7",
- "@tanstack/start": "^1.139.7",
- "highlight.js": "^11.11.4",
+ "@tanstack/react-start": "^1.139.8",
+ "highlight.js": "^11.11.1",
"lucide-react": "^0.555.0",
"react": "^19.2.0",
"react-dom": "^19.2.0",
- "react-markdown": "^10.0.0",
- "rehype-highlight": "^7.0.1",
+ "react-markdown": "^10.1.0",
+ "rehype-highlight": "^7.0.2",
"rehype-raw": "^7.0.0",
"rehype-sanitize": "^6.0.0",
- "remark-gfm": "^4.0.0",
+ "remark-gfm": "^4.0.1",
"tailwindcss": "^4.1.17",
"vite-tsconfig-paths": "^5.1.4",
- "zod": "^3.25.0"
+ "zod": "^4.1.13"
},
"devDependencies": {
"@types/node": "^24.10.1",