diff --git a/docs/getting-started/quick-start.md b/docs/getting-started/quick-start.md index 663db1c6..00ee9ead 100644 --- a/docs/getting-started/quick-start.md +++ b/docs/getting-started/quick-start.md @@ -20,9 +20,64 @@ yarn add @tanstack/ai @tanstack/ai-react @tanstack/ai-openai First, create an API route that handles chat requests. Here's a simplified example: +### TanStack Start + +```typescript +import { chat, toStreamResponse } from "@tanstack/ai"; +import { openai } from "@tanstack/ai-openai"; +import { createFileRoute } from "@tanstack/react-router"; + +export const Route = createFileRoute("/api/chat")({ + server: { + handlers: { + POST: async ({ request }) => { + // Check for API key + if (!process.env.OPENAI_API_KEY) { + return new Response( + JSON.stringify({ + error: "OPENAI_API_KEY not configured", + }), + { + status: 500, + headers: { "Content-Type": "application/json" }, + }, + ); + } + + const { messages, conversationId } = await request.json(); + + try { + // Create a streaming chat response + const stream = chat({ + adapter: openai(), + messages, + model: "gpt-4o", + conversationId, + }); + + // Convert stream to HTTP response + return toStreamResponse(stream); + } catch (error) { + return new Response( + JSON.stringify({ + error: + error instanceof Error ? error.message : "An error occurred", + }), + { + status: 500, + headers: { "Content-Type": "application/json" }, + }, + ); + } + }, + }, + }, +}); +``` + +### Next.js + ```typescript -// app/api/chat/route.ts (Next.js) -// or src/routes/api/chat.ts (TanStack Start) import { chat, toStreamResponse } from "@tanstack/ai"; import { openaiText } from "@tanstack/ai-openai";