All streaming responses in TanStack AI consist of a series of StreamChunks - discrete JSON objects representing different events during the conversation. These chunks enable real-time updates for content generation, tool calls, errors, and completion signals.
This document defines the data structures (chunks) that flow between the TanStack AI server and client during streaming chat operations.
All chunks share a common base structure:
interface BaseStreamChunk {
type: StreamChunkType;
id: string; // Unique identifier for the message/response
model: string; // Model identifier (e.g., "gpt-4o", "claude-3-5-sonnet")
timestamp: number; // Unix timestamp in milliseconds
}
interface BaseStreamChunk {
type: StreamChunkType;
id: string; // Unique identifier for the message/response
model: string; // Model identifier (e.g., "gpt-4o", "claude-3-5-sonnet")
timestamp: number; // Unix timestamp in milliseconds
}
type StreamChunkType =
| 'content' // Text content being generated
| 'thinking' // Model's reasoning process (when supported)
| 'tool_call' // Model calling a tool/function
| 'tool-input-available' // Tool inputs are ready for client execution
| 'approval-requested' // Tool requires user approval
| 'tool_result' // Result from tool execution
| 'done' // Stream completion
| 'error'; // Error occurred
type StreamChunkType =
| 'content' // Text content being generated
| 'thinking' // Model's reasoning process (when supported)
| 'tool_call' // Model calling a tool/function
| 'tool-input-available' // Tool inputs are ready for client execution
| 'approval-requested' // Tool requires user approval
| 'tool_result' // Result from tool execution
| 'done' // Stream completion
| 'error'; // Error occurred
Emitted when the model generates text content. Sent incrementally as tokens are generated.
interface ContentStreamChunk extends BaseStreamChunk {
type: 'content';
delta: string; // The incremental content token (new text since last chunk)
content: string; // Full accumulated content so far
role?: 'assistant';
}
interface ContentStreamChunk extends BaseStreamChunk {
type: 'content';
delta: string; // The incremental content token (new text since last chunk)
content: string; // Full accumulated content so far
role?: 'assistant';
}
Example:
{
"type": "content",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"delta": "Hello",
"content": "Hello",
"role": "assistant"
}
{
"type": "content",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"delta": "Hello",
"content": "Hello",
"role": "assistant"
}
Usage:
Emitted when the model exposes its reasoning process (e.g., Claude with extended thinking, o1 models).
interface ThinkingStreamChunk extends BaseStreamChunk {
type: 'thinking';
delta?: string; // The incremental thinking token
content: string; // Full accumulated thinking content so far
}
interface ThinkingStreamChunk extends BaseStreamChunk {
type: 'thinking';
delta?: string; // The incremental thinking token
content: string; // Full accumulated thinking content so far
}
Example:
{
"type": "thinking",
"id": "chatcmpl-abc123",
"model": "claude-3-5-sonnet",
"timestamp": 1701234567890,
"delta": "First, I need to",
"content": "First, I need to"
}
{
"type": "thinking",
"id": "chatcmpl-abc123",
"model": "claude-3-5-sonnet",
"timestamp": 1701234567890,
"delta": "First, I need to",
"content": "First, I need to"
}
Usage:
Emitted when the model decides to call a tool/function.
interface ToolCallStreamChunk extends BaseStreamChunk {
type: 'tool_call';
toolCall: {
id: string;
type: 'function';
function: {
name: string;
arguments: string; // JSON string (may be partial/incremental)
};
};
index: number; // Index of this tool call (for parallel calls)
}
interface ToolCallStreamChunk extends BaseStreamChunk {
type: 'tool_call';
toolCall: {
id: string;
type: 'function';
function: {
name: string;
arguments: string; // JSON string (may be partial/incremental)
};
};
index: number; // Index of this tool call (for parallel calls)
}
Example:
{
"type": "tool_call",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCall": {
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"San Francisco\"}"
}
},
"index": 0
}
{
"type": "tool_call",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCall": {
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\":\"San Francisco\"}"
}
},
"index": 0
}
Usage:
Emitted when tool inputs are complete and ready for client-side execution.
interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
type: 'tool-input-available';
toolCallId: string; // ID of the tool call
toolName: string; // Name of the tool to execute
input: any; // Parsed tool arguments (JSON object)
}
interface ToolInputAvailableStreamChunk extends BaseStreamChunk {
type: 'tool-input-available';
toolCallId: string; // ID of the tool call
toolName: string; // Name of the tool to execute
input: any; // Parsed tool arguments (JSON object)
}
Example:
{
"type": "tool-input-available",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCallId": "call_abc123",
"toolName": "get_weather",
"input": {
"location": "San Francisco",
"unit": "fahrenheit"
}
}
{
"type": "tool-input-available",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCallId": "call_abc123",
"toolName": "get_weather",
"input": {
"location": "San Francisco",
"unit": "fahrenheit"
}
}
Usage:
Emitted when a tool requires user approval before execution.
interface ApprovalRequestedStreamChunk extends BaseStreamChunk {
type: 'approval-requested';
toolCallId: string; // ID of the tool call
toolName: string; // Name of the tool requiring approval
input: any; // Tool arguments for review
approval: {
id: string; // Unique approval request ID
needsApproval: true; // Always true
};
}
interface ApprovalRequestedStreamChunk extends BaseStreamChunk {
type: 'approval-requested';
toolCallId: string; // ID of the tool call
toolName: string; // Name of the tool requiring approval
input: any; // Tool arguments for review
approval: {
id: string; // Unique approval request ID
needsApproval: true; // Always true
};
}
Example:
{
"type": "approval-requested",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCallId": "call_abc123",
"toolName": "send_email",
"input": {
"to": "user@example.com",
"subject": "Hello",
"body": "Test email"
},
"approval": {
"id": "approval_xyz789",
"needsApproval": true
}
}
{
"type": "approval-requested",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567890,
"toolCallId": "call_abc123",
"toolName": "send_email",
"input": {
"to": "user@example.com",
"subject": "Hello",
"body": "Test email"
},
"approval": {
"id": "approval_xyz789",
"needsApproval": true
}
}
Usage:
Emitted when a tool execution completes (either server-side or client-side).
interface ToolResultStreamChunk extends BaseStreamChunk {
type: 'tool_result';
toolCallId: string; // ID of the tool call that was executed
content: string; // Result of the tool execution (JSON stringified)
}
interface ToolResultStreamChunk extends BaseStreamChunk {
type: 'tool_result';
toolCallId: string; // ID of the tool call that was executed
content: string; // Result of the tool execution (JSON stringified)
}
Example:
{
"type": "tool_result",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567891,
"toolCallId": "call_abc123",
"content": "{\"temperature\":72,\"conditions\":\"sunny\"}"
}
{
"type": "tool_result",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567891,
"toolCallId": "call_abc123",
"content": "{\"temperature\":72,\"conditions\":\"sunny\"}"
}
Usage:
Emitted when the stream completes successfully.
interface DoneStreamChunk extends BaseStreamChunk {
type: 'done';
finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
interface DoneStreamChunk extends BaseStreamChunk {
type: 'done';
finishReason: 'stop' | 'length' | 'content_filter' | 'tool_calls' | null;
usage?: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
Example:
{
"type": "done",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567892,
"finishReason": "stop",
"usage": {
"promptTokens": 150,
"completionTokens": 75,
"totalTokens": 225
}
}
{
"type": "done",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567892,
"finishReason": "stop",
"usage": {
"promptTokens": 150,
"completionTokens": 75,
"totalTokens": 225
}
}
Finish Reasons:
Usage:
Emitted when an error occurs during streaming.
interface ErrorStreamChunk extends BaseStreamChunk {
type: 'error';
error: {
message: string; // Human-readable error message
code?: string; // Optional error code
};
}
interface ErrorStreamChunk extends BaseStreamChunk {
type: 'error';
error: {
message: string; // Human-readable error message
code?: string; // Optional error code
};
}
Example:
{
"type": "error",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567893,
"error": {
"message": "Rate limit exceeded",
"code": "rate_limit_exceeded"
}
}
{
"type": "error",
"id": "chatcmpl-abc123",
"model": "gpt-4o",
"timestamp": 1701234567893,
"error": {
"message": "Rate limit exceeded",
"code": "rate_limit_exceeded"
}
}
Common Error Codes:
Usage:
Content Generation:
ContentStreamChunk (delta: "Hello")
ContentStreamChunk (delta: " world")
ContentStreamChunk (delta: "!")
DoneStreamChunk (finishReason: "stop")
ContentStreamChunk (delta: "Hello")
ContentStreamChunk (delta: " world")
ContentStreamChunk (delta: "!")
DoneStreamChunk (finishReason: "stop")
With Thinking:
ThinkingStreamChunk (delta: "I need to...")
ThinkingStreamChunk (delta: " check the weather")
ContentStreamChunk (delta: "Let me check")
DoneStreamChunk (finishReason: "stop")
ThinkingStreamChunk (delta: "I need to...")
ThinkingStreamChunk (delta: " check the weather")
ContentStreamChunk (delta: "Let me check")
DoneStreamChunk (finishReason: "stop")
Tool Usage:
ToolCallStreamChunk (name: "get_weather")
ToolResultStreamChunk (content: "{...}")
ContentStreamChunk (delta: "The weather is...")
DoneStreamChunk (finishReason: "stop")
ToolCallStreamChunk (name: "get_weather")
ToolResultStreamChunk (content: "{...}")
ContentStreamChunk (delta: "The weather is...")
DoneStreamChunk (finishReason: "stop")
Client Tool with Approval:
ToolCallStreamChunk (name: "send_email")
ApprovalRequestedStreamChunk (toolName: "send_email")
[User approves]
ToolInputAvailableStreamChunk (toolName: "send_email")
[Client executes]
ToolResultStreamChunk (content: "{\"sent\":true}")
ContentStreamChunk (delta: "Email sent successfully")
DoneStreamChunk (finishReason: "stop")
ToolCallStreamChunk (name: "send_email")
ApprovalRequestedStreamChunk (toolName: "send_email")
[User approves]
ToolInputAvailableStreamChunk (toolName: "send_email")
[Client executes]
ToolResultStreamChunk (content: "{\"sent\":true}")
ContentStreamChunk (delta: "Email sent successfully")
DoneStreamChunk (finishReason: "stop")
When the model calls multiple tools in parallel:
ToolCallStreamChunk (index: 0, name: "get_weather")
ToolCallStreamChunk (index: 1, name: "get_time")
ToolResultStreamChunk (toolCallId: "call_1")
ToolResultStreamChunk (toolCallId: "call_2")
ContentStreamChunk (delta: "Based on the data...")
DoneStreamChunk (finishReason: "stop")
ToolCallStreamChunk (index: 0, name: "get_weather")
ToolCallStreamChunk (index: 1, name: "get_time")
ToolResultStreamChunk (toolCallId: "call_1")
ToolResultStreamChunk (toolCallId: "call_2")
ContentStreamChunk (delta: "Based on the data...")
DoneStreamChunk (finishReason: "stop")
All chunks are represented as a discriminated union:
type StreamChunk =
| ContentStreamChunk
| ThinkingStreamChunk
| ToolCallStreamChunk
| ToolInputAvailableStreamChunk
| ApprovalRequestedStreamChunk
| ToolResultStreamChunk
| DoneStreamChunk
| ErrorStreamChunk;
type StreamChunk =
| ContentStreamChunk
| ThinkingStreamChunk
| ToolCallStreamChunk
| ToolInputAvailableStreamChunk
| ApprovalRequestedStreamChunk
| ToolResultStreamChunk
| DoneStreamChunk
| ErrorStreamChunk;
This enables type-safe handling in TypeScript:
function handleChunk(chunk: StreamChunk) {
switch (chunk.type) {
case 'content':
console.log(chunk.delta); // TypeScript knows this is ContentStreamChunk
break;
case 'thinking':
console.log(chunk.content); // TypeScript knows this is ThinkingStreamChunk
break;
case 'tool_call':
console.log(chunk.toolCall.function.name); // TypeScript knows structure
break;
// ... other cases
}
}
function handleChunk(chunk: StreamChunk) {
switch (chunk.type) {
case 'content':
console.log(chunk.delta); // TypeScript knows this is ContentStreamChunk
break;
case 'thinking':
console.log(chunk.content); // TypeScript knows this is ThinkingStreamChunk
break;
case 'tool_call':
console.log(chunk.toolCall.function.name); // TypeScript knows structure
break;
// ... other cases
}
}
