From cb8ea5f48f23dc29f5acc9db0a3e6a2c29857b22 Mon Sep 17 00:00:00 2001 From: Sergiy Dybskiy Date: Thu, 14 May 2026 18:34:00 -0400 Subject: [PATCH 1/2] feat(node): Add AI SDK v7 support via diagnostics_channel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AI SDK v7 publishes all telemetry events to node:diagnostics_channel on 'aisdk:telemetry', regardless of which OpenTelemetry integration the user registers. This subscribes to that channel and creates spans directly with gen_ai.* attributes — no OTel span translation needed. - v3-v6: existing OTel instrumentation path (unchanged) - v7+: diagnostic channel subscriber creates spans from raw events - On v3-v6, the DC subscriber is inert (channel never published to) Handles: generateText, streamText, generateObject, streamObject, embed, embedMany, rerank, tool execution, tool errors, ToolLoopAgent. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../vercelai/v7/instrument-with-pii.mjs | 12 + .../suites/tracing/vercelai/v7/instrument.mjs | 11 + .../tracing/vercelai/v7/scenario-disabled.mjs | 26 ++ .../tracing/vercelai/v7/scenario-embed.mjs | 19 + .../vercelai/v7/scenario-error-in-tool.mjs | 41 +++ .../vercelai/v7/scenario-stream-text.mjs | 31 ++ .../vercelai/v7/scenario-tool-loop-agent.mjs | 61 ++++ .../suites/tracing/vercelai/v7/scenario.mjs | 76 ++++ .../suites/tracing/vercelai/v7/test.ts | 332 ++++++++++++++++++ .../tracing/vercelai/dc-handlers.ts | 299 ++++++++++++++++ .../tracing/vercelai/dc-subscriber.ts | 65 ++++ .../integrations/tracing/vercelai/index.ts | 5 + 12 files changed, 978 insertions(+) create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs create mode 100644 dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts create mode 100644 packages/node/src/integrations/tracing/vercelai/dc-handlers.ts create mode 100644 packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs new file mode 100644 index 000000000000..ddc247cc2d41 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs @@ -0,0 +1,12 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], + streamGenAiSpans: true, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs new file mode 100644 index 000000000000..a76d206a0b61 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], + streamGenAiSpans: true, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs new file mode 100644 index 000000000000..2b68639c6bf2 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs @@ -0,0 +1,26 @@ +import * as Sentry from '@sentry/node'; +import { generateText } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + telemetry: { isEnabled: false }, + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Should not be captured' }], + warnings: [], + }), + }), + prompt: 'This should be silent', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs new file mode 100644 index 000000000000..72fa8a041f39 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs @@ -0,0 +1,19 @@ +import * as Sentry from '@sentry/node'; +import { embed } from 'ai'; +import { MockEmbeddingModelV3 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await embed({ + model: new MockEmbeddingModelV3({ + doEmbed: async () => ({ + embeddings: [[0.1, 0.2, 0.3]], + usage: { tokens: 5 }, + }), + }), + value: 'Hello world', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs new file mode 100644 index 000000000000..9ea18401ac35 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs @@ -0,0 +1,41 @@ +import * as Sentry from '@sentry/node'; +import { generateText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }), + }), + tools: { + getWeather: tool({ + inputSchema: z.object({ location: z.string() }), + execute: async () => { + throw new Error('Error in tool'); + }, + }), + }, + prompt: 'What is the weather in San Francisco?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs new file mode 100644 index 000000000000..858d91370678 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs @@ -0,0 +1,31 @@ +import * as Sentry from '@sentry/node'; +import { streamText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const { textStream } = streamText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Streamed response!' }], + warnings: [], + }), + }), + prompt: 'Stream me a response', + }); + + const chunks = []; + for await (const chunk of textStream) { + chunks.push(chunk); + } + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs new file mode 100644 index 000000000000..ede74912ffc5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs @@ -0,0 +1,61 @@ +import * as Sentry from '@sentry/node'; +import { ToolLoopAgent, stepCountIs, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + let callCount = 0; + + const agent = new ToolLoopAgent({ + telemetry: { functionId: 'weather_agent' }, + model: new MockLanguageModelV3({ + doGenerate: async () => { + if (callCount++ === 0) { + return { + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }; + } + return { + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [{ type: 'text', text: 'The weather in San Francisco is sunny, 72°F.' }], + warnings: [], + }; + }, + }), + tools: { + getWeather: tool({ + description: 'Get the current weather for a location', + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, + }), + }, + stopWhen: stepCountIs(3), + }); + + await agent.generate({ + prompt: 'What is the weather in San Francisco?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs new file mode 100644 index 000000000000..cf531d3fb064 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs @@ -0,0 +1,76 @@ +import * as Sentry from '@sentry/node'; +import { generateText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + // Basic generateText — no telemetry config needed, DC subscriber auto-captures + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'First response!' }], + warnings: [], + }), + }), + prompt: 'Where is the first span?', + }); + + // generateText with tool calls + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }), + }), + tools: { + getWeather: tool({ + description: 'Get the current weather for a location', + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, + }), + }, + prompt: 'What is the weather in San Francisco?', + }); + + // generateText with telemetry explicitly disabled — should NOT produce spans + await generateText({ + telemetry: { isEnabled: false }, + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Should not be captured!' }], + warnings: [], + }), + }), + prompt: 'This should be silent', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts new file mode 100644 index 000000000000..73dbf835fdef --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts @@ -0,0 +1,332 @@ +import type { Event } from '@sentry/node'; +import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; + +describe('Vercel AI integration (V7 - diagnostic channel)', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates ai spans via diagnostic channel without sendDefaultPii', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + // 2 generateText calls (3rd is disabled) × (invoke_agent + generate_content) + 1 tool = 5 + expect(container.items).toHaveLength(5); + + const firstInvokeAgentSpan = container.items.find( + span => + span.name === 'invoke_agent mock-model-id' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstInvokeAgentSpan).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent'); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(30); + // No PII — messages should not be recorded + expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined(); + + const firstGenerateContentSpan = container.items.find( + span => + span.name === 'generate_content mock-model-id' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstGenerateContentSpan).toBeDefined(); + expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined(); + + const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolExecutionSpan).toBeDefined(); + expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('creates ai spans with sendDefaultPii: true', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(5); + + const firstInvokeAgentSpan = container.items.find( + span => + span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstInvokeAgentSpan).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]).toBeDefined(); + + const toolGenerateContentSpan = container.items.find( + span => + span.attributes['sentry.op']?.value === 'gen_ai.generate_content' && + span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] !== undefined, + ); + expect(toolGenerateContentSpan).toBeDefined(); + expect(toolGenerateContentSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toBeDefined(); + expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15); + + const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolExecutionSpan).toBeDefined(); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-error-in-tool.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('captures error in tool', async () => { + let transactionEvent: Event | undefined; + let errorEvent: Event | undefined; + + await createRunner() + .expect({ + transaction: transaction => { + transactionEvent = transaction; + }, + }) + .expect({ + span: container => { + expect(container.items).toHaveLength(3); + + const invokeAgentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent', + ); + expect(invokeAgentSpan).toBeDefined(); + + const generateContentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.generate_content', + ); + expect(generateContentSpan).toBeDefined(); + + const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolSpan).toBeDefined(); + expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + }, + }) + .expect({ + event: event => { + errorEvent = event; + }, + }) + .start() + .completed(); + + expect(transactionEvent).toBeDefined(); + expect(transactionEvent!.transaction).toBe('main'); + + expect(errorEvent).toBeDefined(); + expect(errorEvent!.level).toBe('error'); + expect(errorEvent!.tags).toEqual( + expect.objectContaining({ + 'vercel.ai.tool.name': 'getWeather', + 'vercel.ai.tool.callId': 'call-1', + }), + ); + + expect(transactionEvent!.contexts!.trace!.trace_id).toBe(errorEvent!.contexts!.trace!.trace_id); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-tool-loop-agent.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for ToolLoopAgent with tool calls', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(4); + + const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent weather_agent'); + expect(invokeAgentSpan).toBeDefined(); + expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent'); + expect(invokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + + const toolCallsGenerateContentSpan = container.items.find( + span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["tool_call"]', + ); + expect(toolCallsGenerateContentSpan).toBeDefined(); + expect(toolCallsGenerateContentSpan!.name).toBe('generate_content mock-model-id'); + expect(toolCallsGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10); + expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20); + + const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolSpan).toBeDefined(); + expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + expect(toolSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1'); + expect(toolSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function'); + + const finalGenerateContentSpan = container.items.find( + span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["stop"]', + ); + expect(finalGenerateContentSpan).toBeDefined(); + expect(finalGenerateContentSpan!.name).toBe('generate_content mock-model-id'); + expect(finalGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15); + expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(25); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-stream-text.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for streamText', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(2); + + const invokeAgentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent', + ); + expect(invokeAgentSpan).toBeDefined(); + expect(invokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + + const generateContentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.generate_content', + ); + expect(generateContentSpan).toBeDefined(); + expect(generateContentSpan!.name).toBe('generate_content mock-model-id'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-embed.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for embed', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(1); + + const embedSpan = container.items[0]; + expect(embedSpan).toBeDefined(); + expect(embedSpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings'); + expect(embedSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + expect(embedSpan!.name).toBe('embeddings mock-model-id'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-disabled.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('does not create spans when telemetry is disabled', async () => { + await createRunner() + .expect({ + transaction: transaction => { + expect(transaction.transaction).toBe('main'); + expect(transaction.spans).toHaveLength(0); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); +}); diff --git a/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts new file mode 100644 index 000000000000..922f4466ccd3 --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts @@ -0,0 +1,299 @@ +import { + captureException, + getClient, + SEMANTIC_ATTRIBUTE_SENTRY_OP, + SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, + startInactiveSpan, + withScope, +} from '@sentry/core'; +import type { Span } from '@sentry/core'; +import type { VercelAiIntegration } from './types'; +import { INTEGRATION_NAME } from './constants'; +import { determineRecordingSettings } from './instrumentation'; + +const ORIGIN = 'auto.vercelai.dc'; + +interface CallState { + rootSpan: Span; + inferenceSpan?: Span; + toolSpans: Map; + recordInputs: boolean; + recordOutputs: boolean; +} + +const callStates = new Map(); + +function mapOperationName(operationId: string): string { + switch (operationId) { + case 'ai.generateText': + case 'ai.streamText': + case 'ai.generateObject': + case 'ai.streamObject': + return 'invoke_agent'; + case 'ai.embed': + case 'ai.embedMany': + return 'embeddings'; + case 'ai.rerank': + return 'rerank'; + default: + return operationId; + } +} + +function getRecordingSettings(event: Record): { recordInputs: boolean; recordOutputs: boolean } { + const client = getClient(); + const integration = client?.getIntegrationByName(INTEGRATION_NAME); + const integrationOptions = integration?.options; + const defaultRecordingEnabled = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; + + return determineRecordingSettings( + integrationOptions, + { + recordInputs: event.recordInputs as boolean | undefined, + recordOutputs: event.recordOutputs as boolean | undefined, + }, + undefined, + defaultRecordingEnabled, + ); +} + +interface Usage { + inputTokens?: number; + outputTokens?: number; + inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }; + outputTokenDetails?: { reasoningTokens?: number }; +} + +interface ContentPart { + type: string; + text?: string; + toolCallId?: string; + toolName?: string; + input?: unknown; +} +interface ToolCall { + toolCallId: string; + toolName: string; + input?: unknown; +} + +function setUsageAttributes(span: Span, u: Usage): void { + if (u.inputTokens != null) span.setAttribute('gen_ai.usage.input_tokens', u.inputTokens); + if (u.outputTokens != null) span.setAttribute('gen_ai.usage.output_tokens', u.outputTokens); + if (u.inputTokens != null || u.outputTokens != null) { + span.setAttribute('gen_ai.usage.total_tokens', (u.inputTokens ?? 0) + (u.outputTokens ?? 0)); + } + if (u.inputTokenDetails?.cacheReadTokens != null) + span.setAttribute('gen_ai.usage.input_tokens.cached', u.inputTokenDetails.cacheReadTokens); + if (u.inputTokenDetails?.cacheWriteTokens != null) + span.setAttribute('gen_ai.usage.input_tokens.cache_write', u.inputTokenDetails.cacheWriteTokens); + if (u.outputTokenDetails?.reasoningTokens != null) + span.setAttribute('gen_ai.usage.output_tokens.reasoning', u.outputTokenDetails.reasoningTokens); +} + +function normalizeFinishReason(reason: unknown): string { + if (typeof reason !== 'string') return 'stop'; + return reason === 'tool-calls' ? 'tool_call' : reason; +} + +function buildOutputMessages(content: ContentPart[], finishReason: unknown): string | undefined { + const parts: Record[] = []; + const text = content + .filter(p => p.type === 'text' && p.text) + .map(p => p.text) + .join(''); + if (text) parts.push({ type: 'text', content: text }); + for (const tc of content.filter(p => p.type === 'tool-call')) { + parts.push({ + type: 'tool_call', + id: tc.toolCallId, + name: tc.toolName, + arguments: typeof tc.input === 'string' ? tc.input : JSON.stringify(tc.input ?? {}), + }); + } + if (parts.length === 0) return undefined; + return JSON.stringify([{ role: 'assistant', parts, finish_reason: normalizeFinishReason(finishReason) }]); +} + +function formatInputMessages(messages: unknown[]): string { + return JSON.stringify( + messages.map((m: unknown) => + m && typeof m === 'object' && 'role' in m ? m : { role: 'user', content: String(m) }, + ), + ); +} + +export function handleOnStart(event: Record): void { + const operationId = event.operationId as string; + const callId = event.callId as string; + const modelId = event.modelId as string; + const functionId = event.functionId as string | undefined; + const operationName = mapOperationName(operationId); + const { recordInputs, recordOutputs } = getRecordingSettings(event); + + const spanName = functionId ? `${operationName} ${functionId}` : `${operationName} ${modelId}`; + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: `gen_ai.${operationName}`, + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': operationName, + 'gen_ai.request.model': modelId, + }; + if (functionId) attributes['gen_ai.agent.name'] = functionId; + + if (recordInputs) { + const instructions = event.instructions as string | undefined; + if (instructions) { + attributes['gen_ai.system_instructions'] = JSON.stringify([{ type: 'text', content: instructions }]); + } + const messages = event.messages as unknown[] | undefined; + if (Array.isArray(messages)) attributes['gen_ai.input.messages'] = formatInputMessages(messages); + } + + const rootSpan = startInactiveSpan({ name: spanName, attributes }); + callStates.set(callId, { rootSpan, toolSpans: new Map(), recordInputs, recordOutputs }); +} + +export function handleOnLanguageModelCallStart(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + + const modelId = event.modelId as string; + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': 'generate_content', + 'gen_ai.request.model': modelId, + 'gen_ai.system': event.provider as string, + }; + if (state.recordInputs) { + const messages = event.messages as unknown[] | undefined; + if (Array.isArray(messages)) attributes['gen_ai.input.messages'] = formatInputMessages(messages); + const tools = event.tools as unknown[] | undefined; + if (Array.isArray(tools)) attributes['gen_ai.request.available_tools'] = JSON.stringify(tools); + } + state.inferenceSpan = startInactiveSpan({ name: `generate_content ${modelId}`, attributes }); +} + +export function handleOnLanguageModelCallEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state?.inferenceSpan) return; + + const usage = event.usage as Usage | undefined; + if (usage) setUsageAttributes(state.inferenceSpan, usage); + const finishReason = event.finishReason as string | undefined; + if (finishReason) { + state.inferenceSpan.setAttribute( + 'gen_ai.response.finish_reasons', + JSON.stringify([normalizeFinishReason(finishReason)]), + ); + } + if (event.responseId) state.inferenceSpan.setAttribute('gen_ai.response.id', event.responseId as string); + + if (state.recordOutputs) { + const content = event.content as ContentPart[] | undefined; + if (Array.isArray(content)) { + const out = buildOutputMessages(content, finishReason); + if (out) state.inferenceSpan.setAttribute('gen_ai.output.messages', out); + } + } + state.inferenceSpan.end(); + state.inferenceSpan = undefined; +} + +export function handleOnToolExecutionStart(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const toolCall = event.toolCall as ToolCall; + if (!toolCall) return; + + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.name': toolCall.toolName, + 'gen_ai.tool.call.id': toolCall.toolCallId, + 'gen_ai.tool.type': 'function', + }; + if (state.recordInputs && toolCall.input != null) { + attributes['gen_ai.tool.input'] = + typeof toolCall.input === 'string' ? toolCall.input : JSON.stringify(toolCall.input); + } + state.toolSpans.set( + toolCall.toolCallId, + startInactiveSpan({ name: `execute_tool ${toolCall.toolName}`, attributes }), + ); +} + +export function handleOnToolExecutionEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const toolCall = event.toolCall as ToolCall; + if (!toolCall) return; + const toolSpan = state.toolSpans.get(toolCall.toolCallId); + if (!toolSpan) return; + + const toolOutput = event.toolOutput as { type: string; output?: unknown; error?: Error } | undefined; + if (toolOutput?.type === 'tool-result' && state.recordOutputs && toolOutput.output != null) { + try { + toolSpan.setAttribute('gen_ai.tool.output', JSON.stringify(toolOutput.output)); + } catch { + // ignore + } + } else if (toolOutput?.type === 'tool-error' && toolOutput.error) { + toolSpan.setStatus({ code: 2, message: toolOutput.error.message }); + withScope(scope => { + scope.setTag('vercel.ai.tool.name', toolCall.toolName); + scope.setTag('vercel.ai.tool.callId', toolCall.toolCallId); + scope.setLevel('error'); + captureException(toolOutput.error, { mechanism: { type: 'auto.vercelai.dc', handled: false } }); + }); + } + toolSpan.end(); + state.toolSpans.delete(toolCall.toolCallId); +} + +export function handleOnEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + + const usage = (event.totalUsage ?? event.usage) as Usage | undefined; + if (usage) setUsageAttributes(state.rootSpan, usage); + const finishReason = event.finishReason as string | undefined; + if (finishReason) { + state.rootSpan.setAttribute( + 'gen_ai.response.finish_reasons', + JSON.stringify([normalizeFinishReason(finishReason)]), + ); + } + + if (state.recordOutputs) { + const content: ContentPart[] = []; + const text = event.text as string | undefined; + if (text) content.push({ type: 'text', text }); + const toolCalls = event.toolCalls as ToolCall[] | undefined; + if (toolCalls) { + for (const tc of toolCalls) { + content.push({ type: 'tool-call', toolCallId: tc.toolCallId, toolName: tc.toolName, input: tc.input }); + } + } + const out = buildOutputMessages(content, finishReason); + if (out) state.rootSpan.setAttribute('gen_ai.output.messages', out); + } + state.rootSpan.end(); + callStates.delete(event.callId as string); +} + +export function handleOnError(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const error = (event.error ?? event) as Error; + const endWithError = (span: Span): void => { + span.setStatus({ code: 2, message: error?.message }); + span.end(); + }; + for (const [, s] of state.toolSpans) endWithError(s); + state.toolSpans.clear(); + if (state.inferenceSpan) endWithError(state.inferenceSpan); + endWithError(state.rootSpan); + callStates.delete(event.callId as string); +} diff --git a/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts b/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts new file mode 100644 index 000000000000..72e4d17eaa95 --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts @@ -0,0 +1,65 @@ +import { subscribe } from 'node:diagnostics_channel'; +import { + handleOnEnd, + handleOnError, + handleOnLanguageModelCallEnd, + handleOnLanguageModelCallStart, + handleOnStart, + handleOnToolExecutionEnd, + handleOnToolExecutionStart, +} from './dc-handlers'; + +const DC_CHANNEL = 'aisdk:telemetry'; + +function onDiagnosticMessage(message: unknown): void { + const msg = message as { type: string; event: Record }; + if (!msg?.type || !msg?.event) return; + + try { + switch (msg.type) { + case 'onStart': + handleOnStart(msg.event); + break; + case 'onLanguageModelCallStart': + handleOnLanguageModelCallStart(msg.event); + break; + case 'onLanguageModelCallEnd': + handleOnLanguageModelCallEnd(msg.event); + break; + case 'onToolExecutionStart': + handleOnToolExecutionStart(msg.event); + break; + case 'onToolExecutionEnd': + handleOnToolExecutionEnd(msg.event); + break; + case 'onEnd': + handleOnEnd(msg.event); + break; + case 'onError': + handleOnError(msg.event); + break; + } + } catch { + // Never let telemetry processing break the application + } +} + +let subscribed = false; + +/** + * Subscribe to AI SDK v7+ diagnostic channel for telemetry events. + * + * On v3-v6 the channel is never published to, so this is inert. + * On v7+ the AI SDK publishes all telemetry events to 'aisdk:telemetry' + * regardless of which OpenTelemetry integration the user has registered. + */ +export function subscribeAiSdkDiagnosticChannel(): void { + if (subscribed) return; + subscribed = true; + + try { + subscribe(DC_CHANNEL, onDiagnosticMessage); + } catch { + // subscribe may not be available on all runtimes + } +} diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index a0b3f3126d01..e7625448abd0 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -2,6 +2,7 @@ import type { Client, IntegrationFn } from '@sentry/core'; import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; import { generateInstrumentOnce, type modulesIntegration } from '@sentry/node-core'; import { INTEGRATION_NAME } from './constants'; +import { subscribeAiSdkDiagnosticChannel } from './dc-subscriber'; import { SentryVercelAiInstrumentation } from './instrumentation'; import type { VercelAiOptions } from './types'; @@ -35,6 +36,10 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { } else { instrumentation?.callWhenPatched(() => addVercelAiProcessors(client)); } + + // AI SDK v7+ publishes telemetry events to node:diagnostics_channel. + // On v3-v6 the channel is never published to, so this is inert. + subscribeAiSdkDiagnosticChannel(); }, }; }) satisfies IntegrationFn; From 81295190df75a18097714fbaf0650f6ad1f15851 Mon Sep 17 00:00:00 2001 From: Sergiy Dybskiy Date: Fri, 15 May 2026 14:01:50 -0400 Subject: [PATCH 2/2] fix(node): Address PR review feedback for AI SDK v7 DC telemetry - Merge dc-subscriber.ts into dc-handlers.ts - Move DC event types (Usage, ContentPart, ToolCall) to types.ts with source links - Replace switch with Record map for operation name mapping - Add AiSdkFinishReason and AiSdkOperationId string literal types - Add safeStringify utility to prevent JSON.stringify from breaking span tree - Clean up child spans in handleOnEnd to prevent orphaned spans - Guard error handling in handleOnError with instanceof check Co-Authored-By: Claude Opus 4.6 (1M context) --- .../tracing/vercelai/dc-handlers.ts | 197 ++++++++++-------- .../tracing/vercelai/dc-subscriber.ts | 65 ------ .../integrations/tracing/vercelai/dc-utils.ts | 7 + .../integrations/tracing/vercelai/index.ts | 2 +- .../integrations/tracing/vercelai/types.ts | 36 ++++ 5 files changed, 149 insertions(+), 158 deletions(-) delete mode 100644 packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts create mode 100644 packages/node/src/integrations/tracing/vercelai/dc-utils.ts diff --git a/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts index 922f4466ccd3..1e48c9775295 100644 --- a/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts +++ b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts @@ -1,3 +1,4 @@ +import { subscribe } from 'node:diagnostics_channel'; import { captureException, getClient, @@ -7,9 +8,17 @@ import { withScope, } from '@sentry/core'; import type { Span } from '@sentry/core'; -import type { VercelAiIntegration } from './types'; import { INTEGRATION_NAME } from './constants'; +import { safeStringify } from './dc-utils'; import { determineRecordingSettings } from './instrumentation'; +import type { + AiSdkContentPart, + AiSdkFinishReason, + AiSdkOperationId, + AiSdkToolCall, + AiSdkUsage, + VercelAiIntegration, +} from './types'; const ORIGIN = 'auto.vercelai.dc'; @@ -23,66 +32,40 @@ interface CallState { const callStates = new Map(); +const OPERATION_NAME_MAP: Record = { + 'ai.generateText': 'invoke_agent', + 'ai.streamText': 'invoke_agent', + 'ai.generateObject': 'invoke_agent', + 'ai.streamObject': 'invoke_agent', + 'ai.embed': 'embeddings', + 'ai.embedMany': 'embeddings', + 'ai.rerank': 'rerank', +}; + function mapOperationName(operationId: string): string { - switch (operationId) { - case 'ai.generateText': - case 'ai.streamText': - case 'ai.generateObject': - case 'ai.streamObject': - return 'invoke_agent'; - case 'ai.embed': - case 'ai.embedMany': - return 'embeddings'; - case 'ai.rerank': - return 'rerank'; - default: - return operationId; - } + return OPERATION_NAME_MAP[operationId as AiSdkOperationId] ?? operationId; } function getRecordingSettings(event: Record): { recordInputs: boolean; recordOutputs: boolean } { const client = getClient(); const integration = client?.getIntegrationByName(INTEGRATION_NAME); - const integrationOptions = integration?.options; - const defaultRecordingEnabled = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; - + const defaultPii = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; return determineRecordingSettings( - integrationOptions, + integration?.options, { recordInputs: event.recordInputs as boolean | undefined, recordOutputs: event.recordOutputs as boolean | undefined, }, undefined, - defaultRecordingEnabled, + defaultPii, ); } -interface Usage { - inputTokens?: number; - outputTokens?: number; - inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }; - outputTokenDetails?: { reasoningTokens?: number }; -} - -interface ContentPart { - type: string; - text?: string; - toolCallId?: string; - toolName?: string; - input?: unknown; -} -interface ToolCall { - toolCallId: string; - toolName: string; - input?: unknown; -} - -function setUsageAttributes(span: Span, u: Usage): void { +function setUsageAttributes(span: Span, u: AiSdkUsage): void { if (u.inputTokens != null) span.setAttribute('gen_ai.usage.input_tokens', u.inputTokens); if (u.outputTokens != null) span.setAttribute('gen_ai.usage.output_tokens', u.outputTokens); - if (u.inputTokens != null || u.outputTokens != null) { + if (u.inputTokens != null || u.outputTokens != null) span.setAttribute('gen_ai.usage.total_tokens', (u.inputTokens ?? 0) + (u.outputTokens ?? 0)); - } if (u.inputTokenDetails?.cacheReadTokens != null) span.setAttribute('gen_ai.usage.input_tokens.cached', u.inputTokenDetails.cacheReadTokens); if (u.inputTokenDetails?.cacheWriteTokens != null) @@ -91,12 +74,11 @@ function setUsageAttributes(span: Span, u: Usage): void { span.setAttribute('gen_ai.usage.output_tokens.reasoning', u.outputTokenDetails.reasoningTokens); } -function normalizeFinishReason(reason: unknown): string { - if (typeof reason !== 'string') return 'stop'; +function normalizeFinishReason(reason: AiSdkFinishReason): string { return reason === 'tool-calls' ? 'tool_call' : reason; } -function buildOutputMessages(content: ContentPart[], finishReason: unknown): string | undefined { +function buildOutputMessages(content: AiSdkContentPart[], finishReason: AiSdkFinishReason): string | undefined { const parts: Record[] = []; const text = content .filter(p => p.type === 'text' && p.text) @@ -104,19 +86,15 @@ function buildOutputMessages(content: ContentPart[], finishReason: unknown): str .join(''); if (text) parts.push({ type: 'text', content: text }); for (const tc of content.filter(p => p.type === 'tool-call')) { - parts.push({ - type: 'tool_call', - id: tc.toolCallId, - name: tc.toolName, - arguments: typeof tc.input === 'string' ? tc.input : JSON.stringify(tc.input ?? {}), - }); + const args = typeof tc.input === 'string' ? tc.input : safeStringify(tc.input ?? {}); + parts.push({ type: 'tool_call', id: tc.toolCallId, name: tc.toolName, arguments: args }); } if (parts.length === 0) return undefined; - return JSON.stringify([{ role: 'assistant', parts, finish_reason: normalizeFinishReason(finishReason) }]); + return safeStringify([{ role: 'assistant', parts, finish_reason: normalizeFinishReason(finishReason) }]); } -function formatInputMessages(messages: unknown[]): string { - return JSON.stringify( +function formatInputMessages(messages: unknown[]): string | undefined { + return safeStringify( messages.map((m: unknown) => m && typeof m === 'object' && 'role' in m ? m : { role: 'user', content: String(m) }, ), @@ -143,10 +121,14 @@ export function handleOnStart(event: Record): void { if (recordInputs) { const instructions = event.instructions as string | undefined; if (instructions) { - attributes['gen_ai.system_instructions'] = JSON.stringify([{ type: 'text', content: instructions }]); + const val = safeStringify([{ type: 'text', content: instructions }]); + if (val) attributes['gen_ai.system_instructions'] = val; } const messages = event.messages as unknown[] | undefined; - if (Array.isArray(messages)) attributes['gen_ai.input.messages'] = formatInputMessages(messages); + if (Array.isArray(messages)) { + const val = formatInputMessages(messages); + if (val) attributes['gen_ai.input.messages'] = val; + } } const rootSpan = startInactiveSpan({ name: spanName, attributes }); @@ -167,9 +149,15 @@ export function handleOnLanguageModelCallStart(event: Record): }; if (state.recordInputs) { const messages = event.messages as unknown[] | undefined; - if (Array.isArray(messages)) attributes['gen_ai.input.messages'] = formatInputMessages(messages); + if (Array.isArray(messages)) { + const val = formatInputMessages(messages); + if (val) attributes['gen_ai.input.messages'] = val; + } const tools = event.tools as unknown[] | undefined; - if (Array.isArray(tools)) attributes['gen_ai.request.available_tools'] = JSON.stringify(tools); + if (Array.isArray(tools)) { + const val = safeStringify(tools); + if (val) attributes['gen_ai.request.available_tools'] = val; + } } state.inferenceSpan = startInactiveSpan({ name: `generate_content ${modelId}`, attributes }); } @@ -178,21 +166,19 @@ export function handleOnLanguageModelCallEnd(event: Record): vo const state = callStates.get(event.callId as string); if (!state?.inferenceSpan) return; - const usage = event.usage as Usage | undefined; + const usage = event.usage as AiSdkUsage | undefined; if (usage) setUsageAttributes(state.inferenceSpan, usage); - const finishReason = event.finishReason as string | undefined; + const finishReason = event.finishReason as AiSdkFinishReason | undefined; if (finishReason) { - state.inferenceSpan.setAttribute( - 'gen_ai.response.finish_reasons', - JSON.stringify([normalizeFinishReason(finishReason)]), - ); + const val = safeStringify([normalizeFinishReason(finishReason)]); + if (val) state.inferenceSpan.setAttribute('gen_ai.response.finish_reasons', val); } if (event.responseId) state.inferenceSpan.setAttribute('gen_ai.response.id', event.responseId as string); if (state.recordOutputs) { - const content = event.content as ContentPart[] | undefined; + const content = event.content as AiSdkContentPart[] | undefined; if (Array.isArray(content)) { - const out = buildOutputMessages(content, finishReason); + const out = buildOutputMessages(content, finishReason ?? 'stop'); if (out) state.inferenceSpan.setAttribute('gen_ai.output.messages', out); } } @@ -203,7 +189,7 @@ export function handleOnLanguageModelCallEnd(event: Record): vo export function handleOnToolExecutionStart(event: Record): void { const state = callStates.get(event.callId as string); if (!state) return; - const toolCall = event.toolCall as ToolCall; + const toolCall = event.toolCall as AiSdkToolCall; if (!toolCall) return; const attributes: Record = { @@ -215,8 +201,8 @@ export function handleOnToolExecutionStart(event: Record): void 'gen_ai.tool.type': 'function', }; if (state.recordInputs && toolCall.input != null) { - attributes['gen_ai.tool.input'] = - typeof toolCall.input === 'string' ? toolCall.input : JSON.stringify(toolCall.input); + const val = typeof toolCall.input === 'string' ? toolCall.input : safeStringify(toolCall.input); + if (val) attributes['gen_ai.tool.input'] = val; } state.toolSpans.set( toolCall.toolCallId, @@ -227,18 +213,15 @@ export function handleOnToolExecutionStart(event: Record): void export function handleOnToolExecutionEnd(event: Record): void { const state = callStates.get(event.callId as string); if (!state) return; - const toolCall = event.toolCall as ToolCall; + const toolCall = event.toolCall as AiSdkToolCall; if (!toolCall) return; const toolSpan = state.toolSpans.get(toolCall.toolCallId); if (!toolSpan) return; const toolOutput = event.toolOutput as { type: string; output?: unknown; error?: Error } | undefined; if (toolOutput?.type === 'tool-result' && state.recordOutputs && toolOutput.output != null) { - try { - toolSpan.setAttribute('gen_ai.tool.output', JSON.stringify(toolOutput.output)); - } catch { - // ignore - } + const val = safeStringify(toolOutput.output); + if (val) toolSpan.setAttribute('gen_ai.tool.output', val); } else if (toolOutput?.type === 'tool-error' && toolOutput.error) { toolSpan.setStatus({ code: 2, message: toolOutput.error.message }); withScope(scope => { @@ -256,29 +239,26 @@ export function handleOnEnd(event: Record): void { const state = callStates.get(event.callId as string); if (!state) return; - const usage = (event.totalUsage ?? event.usage) as Usage | undefined; + const usage = (event.totalUsage ?? event.usage) as AiSdkUsage | undefined; if (usage) setUsageAttributes(state.rootSpan, usage); - const finishReason = event.finishReason as string | undefined; + const finishReason = event.finishReason as AiSdkFinishReason | undefined; if (finishReason) { - state.rootSpan.setAttribute( - 'gen_ai.response.finish_reasons', - JSON.stringify([normalizeFinishReason(finishReason)]), - ); + const val = safeStringify([normalizeFinishReason(finishReason)]); + if (val) state.rootSpan.setAttribute('gen_ai.response.finish_reasons', val); } - if (state.recordOutputs) { - const content: ContentPart[] = []; + const content: AiSdkContentPart[] = []; const text = event.text as string | undefined; if (text) content.push({ type: 'text', text }); - const toolCalls = event.toolCalls as ToolCall[] | undefined; - if (toolCalls) { - for (const tc of toolCalls) { - content.push({ type: 'tool-call', toolCallId: tc.toolCallId, toolName: tc.toolName, input: tc.input }); - } + for (const tc of (event.toolCalls as AiSdkToolCall[] | undefined) ?? []) { + content.push({ type: 'tool-call', toolCallId: tc.toolCallId, toolName: tc.toolName, input: tc.input }); } - const out = buildOutputMessages(content, finishReason); + const out = buildOutputMessages(content, finishReason ?? 'stop'); if (out) state.rootSpan.setAttribute('gen_ai.output.messages', out); } + for (const [, s] of state.toolSpans) s.end(); + state.toolSpans.clear(); + if (state.inferenceSpan) state.inferenceSpan.end(); state.rootSpan.end(); callStates.delete(event.callId as string); } @@ -286,9 +266,9 @@ export function handleOnEnd(event: Record): void { export function handleOnError(event: Record): void { const state = callStates.get(event.callId as string); if (!state) return; - const error = (event.error ?? event) as Error; + const error = event.error instanceof Error ? event.error : undefined; const endWithError = (span: Span): void => { - span.setStatus({ code: 2, message: error?.message }); + span.setStatus({ code: 2, message: error?.message ?? 'unknown error' }); span.end(); }; for (const [, s] of state.toolSpans) endWithError(s); @@ -297,3 +277,36 @@ export function handleOnError(event: Record): void { endWithError(state.rootSpan); callStates.delete(event.callId as string); } + +const DC_CHANNEL = 'aisdk:telemetry'; + +const DC_HANDLERS: Record) => void> = { + onStart: handleOnStart, + onLanguageModelCallStart: handleOnLanguageModelCallStart, + onLanguageModelCallEnd: handleOnLanguageModelCallEnd, + onToolExecutionStart: handleOnToolExecutionStart, + onToolExecutionEnd: handleOnToolExecutionEnd, + onEnd: handleOnEnd, + onError: handleOnError, +}; + +let subscribed = false; + +/** Subscribe to AI SDK v7+ diagnostic channel. Inert on v3-v6. */ +export function subscribeAiSdkDiagnosticChannel(): void { + if (subscribed) return; + subscribed = true; + + try { + subscribe(DC_CHANNEL, (message: unknown) => { + const msg = message as { type: string; event: Record }; + try { + DC_HANDLERS[msg?.type]?.(msg.event); + } catch { + // Never let telemetry processing break the application + } + }); + } catch { + // subscribe may not be available on all runtimes + } +} diff --git a/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts b/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts deleted file mode 100644 index 72e4d17eaa95..000000000000 --- a/packages/node/src/integrations/tracing/vercelai/dc-subscriber.ts +++ /dev/null @@ -1,65 +0,0 @@ -import { subscribe } from 'node:diagnostics_channel'; -import { - handleOnEnd, - handleOnError, - handleOnLanguageModelCallEnd, - handleOnLanguageModelCallStart, - handleOnStart, - handleOnToolExecutionEnd, - handleOnToolExecutionStart, -} from './dc-handlers'; - -const DC_CHANNEL = 'aisdk:telemetry'; - -function onDiagnosticMessage(message: unknown): void { - const msg = message as { type: string; event: Record }; - if (!msg?.type || !msg?.event) return; - - try { - switch (msg.type) { - case 'onStart': - handleOnStart(msg.event); - break; - case 'onLanguageModelCallStart': - handleOnLanguageModelCallStart(msg.event); - break; - case 'onLanguageModelCallEnd': - handleOnLanguageModelCallEnd(msg.event); - break; - case 'onToolExecutionStart': - handleOnToolExecutionStart(msg.event); - break; - case 'onToolExecutionEnd': - handleOnToolExecutionEnd(msg.event); - break; - case 'onEnd': - handleOnEnd(msg.event); - break; - case 'onError': - handleOnError(msg.event); - break; - } - } catch { - // Never let telemetry processing break the application - } -} - -let subscribed = false; - -/** - * Subscribe to AI SDK v7+ diagnostic channel for telemetry events. - * - * On v3-v6 the channel is never published to, so this is inert. - * On v7+ the AI SDK publishes all telemetry events to 'aisdk:telemetry' - * regardless of which OpenTelemetry integration the user has registered. - */ -export function subscribeAiSdkDiagnosticChannel(): void { - if (subscribed) return; - subscribed = true; - - try { - subscribe(DC_CHANNEL, onDiagnosticMessage); - } catch { - // subscribe may not be available on all runtimes - } -} diff --git a/packages/node/src/integrations/tracing/vercelai/dc-utils.ts b/packages/node/src/integrations/tracing/vercelai/dc-utils.ts new file mode 100644 index 000000000000..ee380886c52d --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/dc-utils.ts @@ -0,0 +1,7 @@ +export function safeStringify(value: unknown): string | undefined { + try { + return JSON.stringify(value); + } catch { + return undefined; + } +} diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index e7625448abd0..b7b97398dea5 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -2,7 +2,7 @@ import type { Client, IntegrationFn } from '@sentry/core'; import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; import { generateInstrumentOnce, type modulesIntegration } from '@sentry/node-core'; import { INTEGRATION_NAME } from './constants'; -import { subscribeAiSdkDiagnosticChannel } from './dc-subscriber'; +import { subscribeAiSdkDiagnosticChannel } from './dc-handlers'; import { SentryVercelAiInstrumentation } from './instrumentation'; import type { VercelAiOptions } from './types'; diff --git a/packages/node/src/integrations/tracing/vercelai/types.ts b/packages/node/src/integrations/tracing/vercelai/types.ts index 624212f9f7bd..a5addad6a79b 100644 --- a/packages/node/src/integrations/tracing/vercelai/types.ts +++ b/packages/node/src/integrations/tracing/vercelai/types.ts @@ -73,3 +73,39 @@ export interface VercelAiOptions { export interface VercelAiIntegration extends Integration { options: VercelAiOptions; } + +// -- Diagnostic channel event types -- +// Shapes derived from the AI SDK telemetry events. +// See: https://github.com/vercel/ai/blob/main/packages/ai/core/telemetry + +export type AiSdkOperationId = + | 'ai.generateText' + | 'ai.streamText' + | 'ai.generateObject' + | 'ai.streamObject' + | 'ai.embed' + | 'ai.embedMany' + | 'ai.rerank'; + +export type AiSdkFinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown'; + +export interface AiSdkUsage { + inputTokens?: number; + outputTokens?: number; + inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }; + outputTokenDetails?: { reasoningTokens?: number }; +} + +export interface AiSdkContentPart { + type: string; + text?: string; + toolCallId?: string; + toolName?: string; + input?: unknown; +} + +export interface AiSdkToolCall { + toolCallId: string; + toolName: string; + input?: unknown; +}