diff --git a/.changeset/opentelemetry-instrumentation.md b/.changeset/opentelemetry-instrumentation.md new file mode 100644 index 00000000..bfa11825 --- /dev/null +++ b/.changeset/opentelemetry-instrumentation.md @@ -0,0 +1,7 @@ +--- +"@tanstack/ai-opentelemetry": minor +"@tanstack/ai-openai": patch +"@tanstack/ai-anthropic": patch +--- + +Add `@tanstack/ai-opentelemetry` package for OpenTelemetry instrumentation that bridges aiEventClient events to OTEL spans following GenAI Semantic Conventions. Remove console.log/error from openai and anthropic adapters for security. diff --git a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts index edfcea79..6cef70c1 100644 --- a/packages/typescript/ai-anthropic/src/anthropic-adapter.ts +++ b/packages/typescript/ai-anthropic/src/anthropic-adapter.ts @@ -99,16 +99,6 @@ export class Anthropic extends BaseAdapter< this.generateId(), ) } catch (error: any) { - console.error('[Anthropic Adapter] Error in chatStream:', { - message: error?.message, - status: error?.status, - statusText: error?.statusText, - code: error?.code, - type: error?.type, - error: error, - stack: error?.stack, - }) - // Emit an error chunk yield { type: 'error', @@ -613,16 +603,6 @@ export class Anthropic extends BaseAdapter< } } } catch (error: any) { - console.error('[Anthropic Adapter] Error in processAnthropicStream:', { - message: error?.message, - status: error?.status, - statusText: error?.statusText, - code: error?.code, - type: error?.type, - error: error, - stack: error?.stack, - }) - yield { type: 'error', id: generateId(), diff --git a/packages/typescript/ai-openai/src/openai-adapter.ts b/packages/typescript/ai-openai/src/openai-adapter.ts index 676d5257..5f1719eb 100644 --- a/packages/typescript/ai-openai/src/openai-adapter.ts +++ b/packages/typescript/ai-openai/src/openai-adapter.ts @@ -116,10 +116,6 @@ export class OpenAI extends BaseAdapter< () => this.generateId(), ) } catch (error: any) { - console.error('>>> chatStream: Fatal error during response creation <<<') - console.error('>>> Error message:', error?.message) - console.error('>>> Error stack:', error?.stack) - console.error('>>> Full error:', error) throw error } } @@ -215,8 +211,6 @@ export class OpenAI extends BaseAdapter< let responseId: string | null = null let model: string = options.model - const eventTypeCounts = new Map() - try { for await (const chunk of stream) { chunkCount++ @@ -466,14 +460,6 @@ export class OpenAI extends BaseAdapter< } } } catch (error: any) { - console.log( - '[OpenAI Adapter] Stream ended with error. Event type summary:', - { - totalChunks: chunkCount, - eventTypes: Object.fromEntries(eventTypeCounts), - error: error.message, - }, - ) yield { type: 'error', id: generateId(), diff --git a/packages/typescript/ai-opentelemetry/README.md b/packages/typescript/ai-opentelemetry/README.md new file mode 100644 index 00000000..56eee7cb --- /dev/null +++ b/packages/typescript/ai-opentelemetry/README.md @@ -0,0 +1,122 @@ +# @tanstack/ai-opentelemetry + +OpenTelemetry instrumentation for TanStack AI. Automatically creates traces and spans for AI operations following [GenAI Semantic Conventions](https://opentelemetry.io/docs/specs/semconv/gen-ai/). + +## Installation + +```bash +npm install @tanstack/ai-opentelemetry @opentelemetry/api +``` + +## Quick Start + +```typescript +import { enableOpenTelemetry } from '@tanstack/ai-opentelemetry' + +// Enable instrumentation (uses global tracer provider) +enableOpenTelemetry() + +// Now all TanStack AI operations will be traced +import { chat } from '@tanstack/ai' +import { openai } from '@tanstack/ai-openai' + +const stream = chat({ + adapter: openai(), + model: 'gpt-4o', + messages: [{ role: 'user', content: 'Hello!' }], +}) +``` + +## Configuration + +```typescript +import { enableOpenTelemetry } from '@tanstack/ai-opentelemetry' +import { trace } from '@opentelemetry/api' + +enableOpenTelemetry({ + // Provide custom tracer + tracer: trace.getTracer('my-app', '1.0.0'), + + // Record prompt/response content (disabled by default for privacy) + recordContent: true, + + // Record tool call arguments and results (enabled by default) + recordToolCalls: true, +}) +``` + +## Spans Created + +The instrumentation creates the following spans: + +### Chat Span +- **Name**: `chat {model}` +- **Kind**: CLIENT +- **Attributes**: + - `gen_ai.system`: Provider name (openai, anthropic, etc.) + - `gen_ai.request.model`: Requested model + - `gen_ai.response.model`: Actual model used + - `gen_ai.response.finish_reasons`: How the response ended + - `gen_ai.usage.input_tokens`: Prompt tokens + - `gen_ai.usage.output_tokens`: Completion tokens + - `gen_ai.usage.total_tokens`: Total tokens + +### Stream Span +- **Name**: `stream {model}` +- **Kind**: INTERNAL +- **Attributes**: + - `tanstack_ai.stream.total_chunks`: Number of chunks received + - `tanstack_ai.stream.duration_ms`: Stream duration + +### Tool Span +- **Name**: `tool {toolName}` +- **Kind**: INTERNAL +- **Attributes**: + - `gen_ai.tool.name`: Tool name + - `gen_ai.tool.call_id`: Unique tool call ID + - `gen_ai.tool.duration_ms`: Execution duration + +## Example with Jaeger + +```typescript +import { NodeSDK } from '@opentelemetry/sdk-node' +import { JaegerExporter } from '@opentelemetry/exporter-jaeger' +import { enableOpenTelemetry } from '@tanstack/ai-opentelemetry' + +// Setup OpenTelemetry SDK +const sdk = new NodeSDK({ + traceExporter: new JaegerExporter({ + endpoint: 'http://localhost:14268/api/traces', + }), + serviceName: 'my-ai-app', +}) +sdk.start() + +// Enable TanStack AI instrumentation +enableOpenTelemetry() +``` + +## Disabling Instrumentation + +```typescript +import { disableOpenTelemetry } from '@tanstack/ai-opentelemetry' + +disableOpenTelemetry() +``` + +## Manual Instrumentation + +For more control, you can create and manage the instrumentation instance directly: + +```typescript +import { TanStackAIInstrumentation } from '@tanstack/ai-opentelemetry' + +const instrumentation = new TanStackAIInstrumentation({ + recordContent: true, +}) + +instrumentation.enable() + +// Later... +instrumentation.disable() +``` diff --git a/packages/typescript/ai-opentelemetry/package.json b/packages/typescript/ai-opentelemetry/package.json new file mode 100644 index 00000000..8789edec --- /dev/null +++ b/packages/typescript/ai-opentelemetry/package.json @@ -0,0 +1,54 @@ +{ + "name": "@tanstack/ai-opentelemetry", + "version": "0.0.3", + "description": "OpenTelemetry instrumentation for TanStack AI", + "author": "", + "license": "MIT", + "repository": { + "type": "git", + "url": "git+https://github.com/TanStack/ai.git", + "directory": "packages/typescript/ai-opentelemetry" + }, + "keywords": [ + "ai", + "opentelemetry", + "tracing", + "observability", + "tanstack", + "instrumentation" + ], + "type": "module", + "module": "./dist/esm/index.js", + "types": "./dist/esm/index.d.ts", + "exports": { + ".": { + "types": "./dist/esm/index.d.ts", + "import": "./dist/esm/index.js" + } + }, + "files": [ + "dist", + "src" + ], + "scripts": { + "build": "vite build", + "clean": "premove ./build ./dist", + "lint:fix": "eslint ./src --fix", + "test:build": "publint --strict", + "test:eslint": "eslint ./src", + "test:lib": "vitest", + "test:lib:dev": "pnpm test:lib --watch", + "test:types": "tsc" + }, + "dependencies": { + "@opentelemetry/api": "^1.9.0", + "@opentelemetry/semantic-conventions": "^1.28.0" + }, + "devDependencies": { + "@tanstack/ai": "workspace:*", + "@vitest/coverage-v8": "4.0.14" + }, + "peerDependencies": { + "@tanstack/ai": "workspace:*" + } +} diff --git a/packages/typescript/ai-opentelemetry/src/index.ts b/packages/typescript/ai-opentelemetry/src/index.ts new file mode 100644 index 00000000..bc330f1c --- /dev/null +++ b/packages/typescript/ai-opentelemetry/src/index.ts @@ -0,0 +1,10 @@ +export { + TanStackAIInstrumentation, + enableOpenTelemetry, + disableOpenTelemetry, +} from './instrumentation' + +export { + GenAIAttributes, + type TanStackAIInstrumentationConfig, +} from './types' diff --git a/packages/typescript/ai-opentelemetry/src/instrumentation.ts b/packages/typescript/ai-opentelemetry/src/instrumentation.ts new file mode 100644 index 00000000..521754af --- /dev/null +++ b/packages/typescript/ai-opentelemetry/src/instrumentation.ts @@ -0,0 +1,510 @@ +import { + trace, + context, + SpanKind, + SpanStatusCode, + type Span, + type Tracer, +} from '@opentelemetry/api' +import { aiEventClient } from '@tanstack/ai/event-client' +import { GenAIAttributes, type TanStackAIInstrumentationConfig } from './types' + +const VERSION = '0.0.3' + +/** + * Manages active spans for correlation across events + */ +class SpanRegistry { + private chat_spans = new Map() + private stream_spans = new Map() + private tool_spans = new Map() + + setChatSpan(request_id: string, span: Span): void { + this.chat_spans.set(request_id, span) + } + + getChatSpan(request_id: string): Span | undefined { + return this.chat_spans.get(request_id) + } + + deleteChatSpan(request_id: string): void { + this.chat_spans.delete(request_id) + } + + setStreamSpan(stream_id: string, span: Span): void { + this.stream_spans.set(stream_id, span) + } + + getStreamSpan(stream_id: string): Span | undefined { + return this.stream_spans.get(stream_id) + } + + deleteStreamSpan(stream_id: string): void { + this.stream_spans.delete(stream_id) + } + + setToolSpan(tool_call_id: string, span: Span): void { + this.tool_spans.set(tool_call_id, span) + } + + getToolSpan(tool_call_id: string): Span | undefined { + return this.tool_spans.get(tool_call_id) + } + + deleteToolSpan(tool_call_id: string): void { + this.tool_spans.delete(tool_call_id) + } +} + +/** + * TanStack AI OpenTelemetry Instrumentation + * + * Subscribes to aiEventClient events and creates OpenTelemetry spans + * following GenAI semantic conventions. + */ +export class TanStackAIInstrumentation { + private tracer: Tracer + private config: TanStackAIInstrumentationConfig + private registry = new SpanRegistry() + private cleanup_functions: Array<() => void> = [] + private enabled = false + + constructor(config: TanStackAIInstrumentationConfig = {}) { + this.config = { + tracerName: '@tanstack/ai', + tracerVersion: VERSION, + recordContent: false, + recordToolCalls: true, + attributePrefix: 'gen_ai', + ...config, + } + + this.tracer = + config.tracer ?? + trace.getTracer( + this.config.tracerName!, + this.config.tracerVersion, + ) + } + + /** + * Enable the instrumentation and start listening to events + */ + enable(): void { + if (this.enabled) { + return + } + + this.enabled = true + this.subscribeToEvents() + } + + /** + * Disable the instrumentation and stop listening to events + */ + disable(): void { + if (!this.enabled) { + return + } + + this.enabled = false + for (const cleanup of this.cleanup_functions) { + cleanup() + } + this.cleanup_functions = [] + } + + private subscribeToEvents(): void { + // Chat lifecycle events + this.cleanup_functions.push( + aiEventClient.on( + 'chat:started', + (event) => this.onChatStarted(event.payload), + { withEventTarget: true }, + ), + ) + + this.cleanup_functions.push( + aiEventClient.on( + 'chat:completed', + (event) => this.onChatCompleted(event.payload), + { withEventTarget: true }, + ), + ) + + this.cleanup_functions.push( + aiEventClient.on( + 'chat:iteration', + (event) => this.onChatIteration(event.payload), + { withEventTarget: true }, + ), + ) + + // Stream events + this.cleanup_functions.push( + aiEventClient.on( + 'stream:started', + (event) => this.onStreamStarted(event.payload), + { withEventTarget: true }, + ), + ) + + this.cleanup_functions.push( + aiEventClient.on( + 'stream:ended', + (event) => this.onStreamEnded(event.payload), + { withEventTarget: true }, + ), + ) + + this.cleanup_functions.push( + aiEventClient.on( + 'stream:chunk:error', + (event) => this.onStreamError(event.payload), + { withEventTarget: true }, + ), + ) + + // Tool events + this.cleanup_functions.push( + aiEventClient.on( + 'stream:chunk:tool-call', + (event) => this.onToolCallStarted(event.payload), + { withEventTarget: true }, + ), + ) + + this.cleanup_functions.push( + aiEventClient.on( + 'tool:call-completed', + (event) => this.onToolCallCompleted(event.payload), + { withEventTarget: true }, + ), + ) + + // Usage events + this.cleanup_functions.push( + aiEventClient.on( + 'usage:tokens', + (event) => this.onUsageTokens(event.payload), + { withEventTarget: true }, + ), + ) + } + + private onChatStarted(event: { + requestId: string + streamId: string + provider: string + model: string + messageCount: number + hasTools: boolean + streaming: boolean + timestamp: number + clientId?: string + toolNames?: Array + }): void { + const span = this.tracer.startSpan( + `chat ${event.model}`, + { + kind: SpanKind.CLIENT, + startTime: new Date(event.timestamp), + attributes: { + [GenAIAttributes.SYSTEM]: event.provider, + [GenAIAttributes.REQUEST_MODEL]: event.model, + [GenAIAttributes.TANSTACK_REQUEST_ID]: event.requestId, + [GenAIAttributes.TANSTACK_STREAM_ID]: event.streamId, + [GenAIAttributes.TANSTACK_MESSAGE_COUNT]: event.messageCount, + [GenAIAttributes.TANSTACK_HAS_TOOLS]: event.hasTools, + [GenAIAttributes.TANSTACK_STREAMING]: event.streaming, + ...(event.clientId && { + [GenAIAttributes.TANSTACK_CLIENT_ID]: event.clientId, + }), + ...(event.toolNames && { + 'gen_ai.request.tool_names': event.toolNames.join(','), + }), + }, + }, + ) + + this.registry.setChatSpan(event.requestId, span) + } + + private onChatCompleted(event: { + requestId: string + streamId: string + model: string + content: string + messageId?: string + finishReason?: string + usage?: { + promptTokens: number + completionTokens: number + totalTokens: number + } + timestamp: number + }): void { + const span = this.registry.getChatSpan(event.requestId) + if (!span) { + return + } + + span.setAttributes({ + [GenAIAttributes.RESPONSE_MODEL]: event.model, + ...(event.finishReason && { + [GenAIAttributes.RESPONSE_FINISH_REASONS]: event.finishReason, + }), + ...(event.usage && { + [GenAIAttributes.USAGE_INPUT_TOKENS]: event.usage.promptTokens, + [GenAIAttributes.USAGE_OUTPUT_TOKENS]: event.usage.completionTokens, + [GenAIAttributes.USAGE_TOTAL_TOKENS]: event.usage.totalTokens, + }), + }) + + if (this.config.recordContent && event.content) { + span.setAttribute('gen_ai.response.content', event.content) + } + + span.setStatus({ code: SpanStatusCode.OK }) + span.end(new Date(event.timestamp)) + this.registry.deleteChatSpan(event.requestId) + } + + private onChatIteration(event: { + requestId: string + streamId: string + iterationNumber: number + messageCount: number + toolCallCount: number + timestamp: number + }): void { + const parent_span = this.registry.getChatSpan(event.requestId) + if (!parent_span) { + return + } + + // Create a child span for this iteration + const parent_context = trace.setSpan(context.active(), parent_span) + const iteration_span = this.tracer.startSpan( + `iteration ${event.iterationNumber}`, + { + kind: SpanKind.INTERNAL, + startTime: new Date(event.timestamp), + attributes: { + [GenAIAttributes.TANSTACK_ITERATION]: event.iterationNumber, + [GenAIAttributes.TANSTACK_MESSAGE_COUNT]: event.messageCount, + [GenAIAttributes.TANSTACK_TOOL_COUNT]: event.toolCallCount, + }, + }, + parent_context, + ) + + // End iteration span immediately (it's a point-in-time event) + iteration_span.end(new Date(event.timestamp)) + } + + private onStreamStarted(event: { + streamId: string + model: string + provider: string + timestamp: number + }): void { + const span = this.tracer.startSpan( + `stream ${event.model}`, + { + kind: SpanKind.INTERNAL, + startTime: new Date(event.timestamp), + attributes: { + [GenAIAttributes.SYSTEM]: event.provider, + [GenAIAttributes.REQUEST_MODEL]: event.model, + [GenAIAttributes.TANSTACK_STREAM_ID]: event.streamId, + }, + }, + ) + + this.registry.setStreamSpan(event.streamId, span) + } + + private onStreamEnded(event: { + requestId: string + streamId: string + totalChunks: number + duration: number + timestamp: number + }): void { + const span = this.registry.getStreamSpan(event.streamId) + if (!span) { + return + } + + span.setAttributes({ + 'tanstack_ai.stream.total_chunks': event.totalChunks, + 'tanstack_ai.stream.duration_ms': event.duration, + }) + + span.setStatus({ code: SpanStatusCode.OK }) + span.end(new Date(event.timestamp)) + this.registry.deleteStreamSpan(event.streamId) + } + + private onStreamError(event: { + streamId: string + messageId?: string + error: string + timestamp: number + }): void { + const span = this.registry.getStreamSpan(event.streamId) + if (!span) { + return + } + + span.setStatus({ + code: SpanStatusCode.ERROR, + message: event.error, + }) + span.recordException(new Error(event.error)) + } + + private onToolCallStarted(event: { + streamId: string + messageId?: string + toolCallId: string + toolName: string + index: number + arguments: string + timestamp: number + }): void { + if (!this.config.recordToolCalls) { + return + } + + const parent_span = this.registry.getStreamSpan(event.streamId) + const parent_context = parent_span + ? trace.setSpan(context.active(), parent_span) + : context.active() + + const span = this.tracer.startSpan( + `tool ${event.toolName}`, + { + kind: SpanKind.INTERNAL, + startTime: new Date(event.timestamp), + attributes: { + [GenAIAttributes.TOOL_NAME]: event.toolName, + [GenAIAttributes.TOOL_CALL_ID]: event.toolCallId, + 'gen_ai.tool.index': event.index, + }, + }, + parent_context, + ) + + if (this.config.recordContent && event.arguments) { + span.setAttribute('gen_ai.tool.arguments', event.arguments) + } + + this.registry.setToolSpan(event.toolCallId, span) + } + + private onToolCallCompleted(event: { + requestId: string + streamId: string + messageId?: string + toolCallId: string + toolName: string + result: unknown + duration: number + timestamp: number + }): void { + if (!this.config.recordToolCalls) { + return + } + + const span = this.registry.getToolSpan(event.toolCallId) + if (!span) { + return + } + + span.setAttributes({ + 'gen_ai.tool.duration_ms': event.duration, + }) + + if (this.config.recordContent && event.result !== undefined) { + span.setAttribute( + 'gen_ai.tool.result', + typeof event.result === 'string' + ? event.result + : JSON.stringify(event.result), + ) + } + + span.setStatus({ code: SpanStatusCode.OK }) + span.end(new Date(event.timestamp)) + this.registry.deleteToolSpan(event.toolCallId) + } + + private onUsageTokens(event: { + requestId: string + streamId: string + model: string + messageId?: string + usage: { + promptTokens: number + completionTokens: number + totalTokens: number + } + timestamp: number + }): void { + const span = this.registry.getChatSpan(event.requestId) + if (!span) { + return + } + + span.setAttributes({ + [GenAIAttributes.USAGE_INPUT_TOKENS]: event.usage.promptTokens, + [GenAIAttributes.USAGE_OUTPUT_TOKENS]: event.usage.completionTokens, + [GenAIAttributes.USAGE_TOTAL_TOKENS]: event.usage.totalTokens, + }) + } +} + +// Singleton instance for convenience +let default_instrumentation: TanStackAIInstrumentation | null = null + +/** + * Enable OpenTelemetry instrumentation for TanStack AI + * + * @example + * ```typescript + * import { enableOpenTelemetry } from '@tanstack/ai-opentelemetry' + * + * // Use global tracer provider + * enableOpenTelemetry() + * + * // Or provide custom tracer + * enableOpenTelemetry({ + * tracer: myTracer, + * recordContent: true, // Record prompt/response content + * }) + * ``` + */ +export function enableOpenTelemetry( + config?: TanStackAIInstrumentationConfig, +): TanStackAIInstrumentation { + if (default_instrumentation) { + default_instrumentation.disable() + } + + default_instrumentation = new TanStackAIInstrumentation(config) + default_instrumentation.enable() + + return default_instrumentation +} + +/** + * Disable the default OpenTelemetry instrumentation + */ +export function disableOpenTelemetry(): void { + if (default_instrumentation) { + default_instrumentation.disable() + default_instrumentation = null + } +} diff --git a/packages/typescript/ai-opentelemetry/src/types.ts b/packages/typescript/ai-opentelemetry/src/types.ts new file mode 100644 index 00000000..8a0ebf81 --- /dev/null +++ b/packages/typescript/ai-opentelemetry/src/types.ts @@ -0,0 +1,83 @@ +import type { Tracer } from '@opentelemetry/api' + +/** + * Configuration options for the TanStack AI OpenTelemetry instrumentation + */ +export interface TanStackAIInstrumentationConfig { + /** + * OpenTelemetry tracer instance to use for creating spans + * If not provided, will use the global tracer provider + */ + tracer?: Tracer + + /** + * Name for the tracer (used when tracer is not provided) + * @default '@tanstack/ai' + */ + tracerName?: string + + /** + * Version for the tracer (used when tracer is not provided) + * @default package version + */ + tracerVersion?: string + + /** + * Whether to record prompt/response content in span attributes + * Disable this in production if content may contain sensitive data + * @default false + */ + recordContent?: boolean + + /** + * Whether to record tool call arguments and results + * @default true + */ + recordToolCalls?: boolean + + /** + * Custom attribute prefix for all span attributes + * @default 'gen_ai' + */ + attributePrefix?: string +} + +/** + * GenAI Semantic Conventions for OpenTelemetry + * Based on https://opentelemetry.io/docs/specs/semconv/gen-ai/ + */ +export const GenAIAttributes = { + // System attributes + SYSTEM: 'gen_ai.system', + REQUEST_MODEL: 'gen_ai.request.model', + RESPONSE_MODEL: 'gen_ai.response.model', + + // Request attributes + REQUEST_MAX_TOKENS: 'gen_ai.request.max_tokens', + REQUEST_TEMPERATURE: 'gen_ai.request.temperature', + REQUEST_TOP_P: 'gen_ai.request.top_p', + REQUEST_STOP_SEQUENCES: 'gen_ai.request.stop_sequences', + + // Response attributes + RESPONSE_ID: 'gen_ai.response.id', + RESPONSE_FINISH_REASONS: 'gen_ai.response.finish_reasons', + + // Usage attributes + USAGE_INPUT_TOKENS: 'gen_ai.usage.input_tokens', + USAGE_OUTPUT_TOKENS: 'gen_ai.usage.output_tokens', + USAGE_TOTAL_TOKENS: 'gen_ai.usage.total_tokens', + + // Tool attributes + TOOL_NAME: 'gen_ai.tool.name', + TOOL_CALL_ID: 'gen_ai.tool.call_id', + + // TanStack AI specific attributes + TANSTACK_REQUEST_ID: 'tanstack_ai.request_id', + TANSTACK_STREAM_ID: 'tanstack_ai.stream_id', + TANSTACK_CLIENT_ID: 'tanstack_ai.client_id', + TANSTACK_ITERATION: 'tanstack_ai.iteration', + TANSTACK_MESSAGE_COUNT: 'tanstack_ai.message_count', + TANSTACK_TOOL_COUNT: 'tanstack_ai.tool_count', + TANSTACK_HAS_TOOLS: 'tanstack_ai.has_tools', + TANSTACK_STREAMING: 'tanstack_ai.streaming', +} as const diff --git a/packages/typescript/ai-opentelemetry/tests/instrumentation.test.ts b/packages/typescript/ai-opentelemetry/tests/instrumentation.test.ts new file mode 100644 index 00000000..18a02431 --- /dev/null +++ b/packages/typescript/ai-opentelemetry/tests/instrumentation.test.ts @@ -0,0 +1,451 @@ +import { describe, it, expect, beforeEach, afterEach, vi } from 'vitest' +import { + trace, + context, + SpanKind, + SpanStatusCode, + type Span, + type Tracer, +} from '@opentelemetry/api' +import { aiEventClient } from '@tanstack/ai/event-client' +import { + TanStackAIInstrumentation, + enableOpenTelemetry, + disableOpenTelemetry, + GenAIAttributes, +} from '../src' + +// Mock span +function createMockSpan(): Span & { + attributes: Record + status: { code: SpanStatusCode; message?: string } + ended: boolean + endTime?: Date + exceptions: Array +} { + const mock_span = { + attributes: {} as Record, + status: { code: SpanStatusCode.UNSET } as { code: SpanStatusCode; message?: string }, + ended: false, + endTime: undefined as Date | undefined, + exceptions: [] as Array, + + setAttribute(key: string, value: unknown) { + this.attributes[key] = value + return this + }, + setAttributes(attrs: Record) { + Object.assign(this.attributes, attrs) + return this + }, + setStatus(status: { code: SpanStatusCode; message?: string }) { + this.status = status + return this + }, + end(time?: Date) { + this.ended = true + this.endTime = time + }, + recordException(error: Error) { + this.exceptions.push(error) + }, + addEvent: vi.fn(), + isRecording: () => true, + updateName: vi.fn(), + spanContext: () => ({ + traceId: 'test-trace-id', + spanId: 'test-span-id', + traceFlags: 1, + }), + addLink: vi.fn(), + addLinks: vi.fn(), + } + return mock_span +} + +// Mock tracer +function createMockTracer(): Tracer & { spans: Array> } { + const spans: Array> = [] + + return { + spans, + startSpan(name: string, options?: any) { + const span = createMockSpan() + span.setAttribute('span.name', name) + if (options?.attributes) { + span.setAttributes(options.attributes) + } + spans.push(span) + return span + }, + startActiveSpan: vi.fn(), + } +} + +describe('TanStackAIInstrumentation', () => { + let instrumentation: TanStackAIInstrumentation | null = null + let mock_tracer: ReturnType + + beforeEach(() => { + // Always start with clean state + disableOpenTelemetry() + mock_tracer = createMockTracer() + instrumentation = new TanStackAIInstrumentation({ + tracer: mock_tracer, + recordContent: true, + recordToolCalls: true, + }) + }) + + afterEach(() => { + if (instrumentation) { + instrumentation.disable() + instrumentation = null + } + disableOpenTelemetry() + }) + + describe('enable/disable', () => { + it('should not create spans when disabled', () => { + // Don't enable instrumentation + aiEventClient.emit('chat:started', { + requestId: 'req-1', + streamId: 'stream-1', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: Date.now(), + }) + + expect(mock_tracer.spans.length).toBe(0) + }) + + it('should create spans when enabled', () => { + instrumentation.enable() + + aiEventClient.emit('chat:started', { + requestId: 'req-1', + streamId: 'stream-1', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: Date.now(), + }) + + expect(mock_tracer.spans.length).toBe(1) + }) + + it('should be idempotent when enabling multiple times', () => { + instrumentation!.enable() + instrumentation!.enable() // Should not throw or double-subscribe + + // Just one span should be created + aiEventClient.emit('chat:started', { + requestId: 'req-idempotent', + streamId: 'stream-idempotent', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: Date.now(), + }) + + // Only one span from our instrumentation + expect(mock_tracer.spans.length).toBe(1) + }) + }) + + describe('chat:started event', () => { + beforeEach(() => { + instrumentation.enable() + }) + + it('should create a chat span with correct attributes', () => { + const timestamp = Date.now() + + aiEventClient.emit('chat:started', { + requestId: 'req-123', + streamId: 'stream-456', + provider: 'openai', + model: 'gpt-4o', + messageCount: 3, + hasTools: true, + streaming: true, + timestamp, + clientId: 'client-789', + toolNames: ['get_weather', 'search'], + }) + + expect(mock_tracer.spans.length).toBe(1) + const span = mock_tracer.spans[0]! + + expect(span.attributes['span.name']).toBe('chat gpt-4o') + expect(span.attributes[GenAIAttributes.SYSTEM]).toBe('openai') + expect(span.attributes[GenAIAttributes.REQUEST_MODEL]).toBe('gpt-4o') + expect(span.attributes[GenAIAttributes.TANSTACK_REQUEST_ID]).toBe('req-123') + expect(span.attributes[GenAIAttributes.TANSTACK_STREAM_ID]).toBe('stream-456') + expect(span.attributes[GenAIAttributes.TANSTACK_MESSAGE_COUNT]).toBe(3) + expect(span.attributes[GenAIAttributes.TANSTACK_HAS_TOOLS]).toBe(true) + expect(span.attributes[GenAIAttributes.TANSTACK_STREAMING]).toBe(true) + expect(span.attributes[GenAIAttributes.TANSTACK_CLIENT_ID]).toBe('client-789') + expect(span.attributes['gen_ai.request.tool_names']).toBe('get_weather,search') + }) + }) + + describe('chat:completed event', () => { + beforeEach(() => { + instrumentation.enable() + }) + + it('should end the chat span with usage data', () => { + const start_time = Date.now() + const end_time = start_time + 1000 + + // Start chat + aiEventClient.emit('chat:started', { + requestId: 'req-123', + streamId: 'stream-456', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: start_time, + }) + + // Complete chat + aiEventClient.emit('chat:completed', { + requestId: 'req-123', + streamId: 'stream-456', + model: 'gpt-4o', + content: 'Hello! How can I help you?', + finishReason: 'stop', + usage: { + promptTokens: 10, + completionTokens: 20, + totalTokens: 30, + }, + timestamp: end_time, + }) + + const span = mock_tracer.spans[0]! + + expect(span.attributes[GenAIAttributes.RESPONSE_MODEL]).toBe('gpt-4o') + expect(span.attributes[GenAIAttributes.RESPONSE_FINISH_REASONS]).toBe('stop') + expect(span.attributes[GenAIAttributes.USAGE_INPUT_TOKENS]).toBe(10) + expect(span.attributes[GenAIAttributes.USAGE_OUTPUT_TOKENS]).toBe(20) + expect(span.attributes[GenAIAttributes.USAGE_TOTAL_TOKENS]).toBe(30) + expect(span.attributes['gen_ai.response.content']).toBe('Hello! How can I help you?') + expect(span.status.code).toBe(SpanStatusCode.OK) + expect(span.ended).toBe(true) + }) + }) + + describe('stream events', () => { + beforeEach(() => { + instrumentation.enable() + }) + + it('should create and end stream span', () => { + const start_time = Date.now() + const end_time = start_time + 500 + + aiEventClient.emit('stream:started', { + streamId: 'stream-123', + model: 'gpt-4o', + provider: 'openai', + timestamp: start_time, + }) + + aiEventClient.emit('stream:ended', { + requestId: 'req-123', + streamId: 'stream-123', + totalChunks: 42, + duration: 500, + timestamp: end_time, + }) + + expect(mock_tracer.spans.length).toBe(1) + const span = mock_tracer.spans[0]! + + expect(span.attributes['span.name']).toBe('stream gpt-4o') + expect(span.attributes['tanstack_ai.stream.total_chunks']).toBe(42) + expect(span.attributes['tanstack_ai.stream.duration_ms']).toBe(500) + expect(span.status.code).toBe(SpanStatusCode.OK) + expect(span.ended).toBe(true) + }) + + it('should record error on stream:chunk:error', () => { + aiEventClient.emit('stream:started', { + streamId: 'stream-123', + model: 'gpt-4o', + provider: 'openai', + timestamp: Date.now(), + }) + + aiEventClient.emit('stream:chunk:error', { + streamId: 'stream-123', + error: 'Rate limit exceeded', + timestamp: Date.now(), + }) + + const span = mock_tracer.spans[0]! + + expect(span.status.code).toBe(SpanStatusCode.ERROR) + expect(span.status.message).toBe('Rate limit exceeded') + expect(span.exceptions.length).toBe(1) + expect(span.exceptions[0]!.message).toBe('Rate limit exceeded') + }) + }) + + describe('tool events', () => { + beforeEach(() => { + instrumentation.enable() + }) + + it('should create tool span on tool call', () => { + // First create a stream span as parent + aiEventClient.emit('stream:started', { + streamId: 'stream-123', + model: 'gpt-4o', + provider: 'openai', + timestamp: Date.now(), + }) + + aiEventClient.emit('stream:chunk:tool-call', { + streamId: 'stream-123', + toolCallId: 'tool-call-1', + toolName: 'get_weather', + index: 0, + arguments: '{"location": "Tokyo"}', + timestamp: Date.now(), + }) + + // Should have stream span + tool span + expect(mock_tracer.spans.length).toBe(2) + const tool_span = mock_tracer.spans[1]! + + expect(tool_span.attributes['span.name']).toBe('tool get_weather') + expect(tool_span.attributes[GenAIAttributes.TOOL_NAME]).toBe('get_weather') + expect(tool_span.attributes[GenAIAttributes.TOOL_CALL_ID]).toBe('tool-call-1') + expect(tool_span.attributes['gen_ai.tool.index']).toBe(0) + expect(tool_span.attributes['gen_ai.tool.arguments']).toBe('{"location": "Tokyo"}') + }) + + it('should end tool span on completion', () => { + aiEventClient.emit('stream:started', { + streamId: 'stream-123', + model: 'gpt-4o', + provider: 'openai', + timestamp: Date.now(), + }) + + aiEventClient.emit('stream:chunk:tool-call', { + streamId: 'stream-123', + toolCallId: 'tool-call-1', + toolName: 'get_weather', + index: 0, + arguments: '{"location": "Tokyo"}', + timestamp: Date.now(), + }) + + aiEventClient.emit('tool:call-completed', { + requestId: 'req-123', + streamId: 'stream-123', + toolCallId: 'tool-call-1', + toolName: 'get_weather', + result: { temperature: 25, condition: 'sunny' }, + duration: 150, + timestamp: Date.now(), + }) + + const tool_span = mock_tracer.spans[1]! + + expect(tool_span.attributes['gen_ai.tool.duration_ms']).toBe(150) + expect(tool_span.attributes['gen_ai.tool.result']).toBe( + '{"temperature":25,"condition":"sunny"}', + ) + expect(tool_span.status.code).toBe(SpanStatusCode.OK) + expect(tool_span.ended).toBe(true) + }) + }) + + describe('recordContent option', () => { + it('should not record content when recordContent is false', () => { + const no_content_instrumentation = new TanStackAIInstrumentation({ + tracer: mock_tracer, + recordContent: false, + }) + no_content_instrumentation.enable() + + aiEventClient.emit('chat:started', { + requestId: 'req-123', + streamId: 'stream-456', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: Date.now(), + }) + + aiEventClient.emit('chat:completed', { + requestId: 'req-123', + streamId: 'stream-456', + model: 'gpt-4o', + content: 'Secret content', + timestamp: Date.now(), + }) + + const span = mock_tracer.spans[0]! + expect(span.attributes['gen_ai.response.content']).toBeUndefined() + + no_content_instrumentation.disable() + }) + }) + + describe('enableOpenTelemetry helper', () => { + beforeEach(() => { + // Disable the default instrumentation from the outer beforeEach + if (instrumentation) { + instrumentation.disable() + instrumentation = null + } + }) + + it('should create and enable instrumentation', () => { + const local_tracer = createMockTracer() + const inst = enableOpenTelemetry({ tracer: local_tracer }) + + aiEventClient.emit('chat:started', { + requestId: 'req-helper-1', + streamId: 'stream-helper-1', + provider: 'openai', + model: 'gpt-4o', + messageCount: 1, + hasTools: false, + streaming: true, + timestamp: Date.now(), + }) + + expect(local_tracer.spans.length).toBe(1) + inst.disable() + }) + + it('should return an instrumentation instance that can be disabled', () => { + const local_tracer = createMockTracer() + const inst = enableOpenTelemetry({ tracer: local_tracer }) + + // Verify it returns a valid instrumentation + expect(inst).toBeInstanceOf(TanStackAIInstrumentation) + + // Disable should not throw + expect(() => inst.disable()).not.toThrow() + }) + }) +}) diff --git a/packages/typescript/ai-opentelemetry/tsconfig.json b/packages/typescript/ai-opentelemetry/tsconfig.json new file mode 100644 index 00000000..e5e87274 --- /dev/null +++ b/packages/typescript/ai-opentelemetry/tsconfig.json @@ -0,0 +1,8 @@ +{ + "extends": "../../../tsconfig.json", + "compilerOptions": { + "outDir": "dist" + }, + "include": ["vite.config.ts", "./src"], + "exclude": ["node_modules", "dist", "**/*.config.ts"] +} diff --git a/packages/typescript/ai-opentelemetry/vite.config.ts b/packages/typescript/ai-opentelemetry/vite.config.ts new file mode 100644 index 00000000..11f5b20b --- /dev/null +++ b/packages/typescript/ai-opentelemetry/vite.config.ts @@ -0,0 +1,37 @@ +import { defineConfig, mergeConfig } from 'vitest/config' +import { tanstackViteConfig } from '@tanstack/vite-config' +import packageJson from './package.json' + +const config = defineConfig({ + test: { + name: packageJson.name, + dir: './', + watch: false, + + globals: true, + environment: 'node', + include: ['tests/**/*.test.ts'], + coverage: { + provider: 'v8', + reporter: ['text', 'json', 'html', 'lcov'], + exclude: [ + 'node_modules/', + 'dist/', + 'tests/', + '**/*.test.ts', + '**/*.config.ts', + '**/types.ts', + ], + include: ['src/**/*.ts'], + }, + }, +}) + +export default mergeConfig( + config, + tanstackViteConfig({ + entry: ['./src/index.ts'], + srcDir: './src', + cjs: false, + }), +)