diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs new file mode 100644 index 000000000000..ddc247cc2d41 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument-with-pii.mjs @@ -0,0 +1,12 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], + streamGenAiSpans: true, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs new file mode 100644 index 000000000000..a76d206a0b61 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/instrument.mjs @@ -0,0 +1,11 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + transport: loggingTransport, + integrations: [Sentry.vercelAIIntegration()], + streamGenAiSpans: true, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs new file mode 100644 index 000000000000..2b68639c6bf2 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-disabled.mjs @@ -0,0 +1,26 @@ +import * as Sentry from '@sentry/node'; +import { generateText } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + telemetry: { isEnabled: false }, + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Should not be captured' }], + warnings: [], + }), + }), + prompt: 'This should be silent', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs new file mode 100644 index 000000000000..72fa8a041f39 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-embed.mjs @@ -0,0 +1,19 @@ +import * as Sentry from '@sentry/node'; +import { embed } from 'ai'; +import { MockEmbeddingModelV3 } from 'ai/test'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await embed({ + model: new MockEmbeddingModelV3({ + doEmbed: async () => ({ + embeddings: [[0.1, 0.2, 0.3]], + usage: { tokens: 5 }, + }), + }), + value: 'Hello world', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs new file mode 100644 index 000000000000..9ea18401ac35 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-error-in-tool.mjs @@ -0,0 +1,41 @@ +import * as Sentry from '@sentry/node'; +import { generateText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }), + }), + tools: { + getWeather: tool({ + inputSchema: z.object({ location: z.string() }), + execute: async () => { + throw new Error('Error in tool'); + }, + }), + }, + prompt: 'What is the weather in San Francisco?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs new file mode 100644 index 000000000000..858d91370678 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-stream-text.mjs @@ -0,0 +1,31 @@ +import * as Sentry from '@sentry/node'; +import { streamText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const { textStream } = streamText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Streamed response!' }], + warnings: [], + }), + }), + prompt: 'Stream me a response', + }); + + const chunks = []; + for await (const chunk of textStream) { + chunks.push(chunk); + } + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs new file mode 100644 index 000000000000..ede74912ffc5 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario-tool-loop-agent.mjs @@ -0,0 +1,61 @@ +import * as Sentry from '@sentry/node'; +import { ToolLoopAgent, stepCountIs, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + let callCount = 0; + + const agent = new ToolLoopAgent({ + telemetry: { functionId: 'weather_agent' }, + model: new MockLanguageModelV3({ + doGenerate: async () => { + if (callCount++ === 0) { + return { + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }; + } + return { + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [{ type: 'text', text: 'The weather in San Francisco is sunny, 72°F.' }], + warnings: [], + }; + }, + }), + tools: { + getWeather: tool({ + description: 'Get the current weather for a location', + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, + }), + }, + stopWhen: stepCountIs(3), + }); + + await agent.generate({ + prompt: 'What is the weather in San Francisco?', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs new file mode 100644 index 000000000000..cf531d3fb064 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/scenario.mjs @@ -0,0 +1,76 @@ +import * as Sentry from '@sentry/node'; +import { generateText, tool } from 'ai'; +import { MockLanguageModelV3 } from 'ai/test'; +import { z } from 'zod'; + +async function run() { + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + // Basic generateText — no telemetry config needed, DC subscriber auto-captures + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'First response!' }], + warnings: [], + }), + }), + prompt: 'Where is the first span?', + }); + + // generateText with tool calls + await generateText({ + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'tool-calls', raw: 'tool_calls' }, + usage: { + inputTokens: { total: 15, noCache: 15, cached: 0 }, + outputTokens: { total: 25, noCache: 25, cached: 0 }, + totalTokens: { total: 40, noCache: 40, cached: 0 }, + }, + content: [ + { + type: 'tool-call', + toolCallId: 'call-1', + toolName: 'getWeather', + input: JSON.stringify({ location: 'San Francisco' }), + }, + ], + warnings: [], + }), + }), + tools: { + getWeather: tool({ + description: 'Get the current weather for a location', + inputSchema: z.object({ location: z.string() }), + execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`, + }), + }, + prompt: 'What is the weather in San Francisco?', + }); + + // generateText with telemetry explicitly disabled — should NOT produce spans + await generateText({ + telemetry: { isEnabled: false }, + model: new MockLanguageModelV3({ + doGenerate: async () => ({ + finishReason: { unified: 'stop', raw: 'stop' }, + usage: { + inputTokens: { total: 10, noCache: 10, cached: 0 }, + outputTokens: { total: 20, noCache: 20, cached: 0 }, + totalTokens: { total: 30, noCache: 30, cached: 0 }, + }, + content: [{ type: 'text', text: 'Should not be captured!' }], + warnings: [], + }), + }), + prompt: 'This should be silent', + }); + }); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts new file mode 100644 index 000000000000..73dbf835fdef --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v7/test.ts @@ -0,0 +1,332 @@ +import type { Event } from '@sentry/node'; +import { afterAll, describe, expect } from 'vitest'; +import { + GEN_AI_INPUT_MESSAGES_ATTRIBUTE, + GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE, + GEN_AI_TOOL_CALL_ID_ATTRIBUTE, + GEN_AI_TOOL_NAME_ATTRIBUTE, + GEN_AI_TOOL_TYPE_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../../../../../../packages/core/src/tracing/ai/gen-ai-attributes'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner'; + +describe('Vercel AI integration (V7 - diagnostic channel)', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates ai spans via diagnostic channel without sendDefaultPii', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + // 2 generateText calls (3rd is disabled) × (invoke_agent + generate_content) + 1 tool = 5 + expect(container.items).toHaveLength(5); + + const firstInvokeAgentSpan = container.items.find( + span => + span.name === 'invoke_agent mock-model-id' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstInvokeAgentSpan).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent'); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(30); + // No PII — messages should not be recorded + expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined(); + + const firstGenerateContentSpan = container.items.find( + span => + span.name === 'generate_content mock-model-id' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstGenerateContentSpan).toBeDefined(); + expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined(); + + const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolExecutionSpan).toBeDefined(); + expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1'); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario.mjs', + 'instrument-with-pii.mjs', + (createRunner, test) => { + test('creates ai spans with sendDefaultPii: true', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(5); + + const firstInvokeAgentSpan = container.items.find( + span => + span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent' && + span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10, + ); + expect(firstInvokeAgentSpan).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined(); + expect(firstInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]).toBeDefined(); + + const toolGenerateContentSpan = container.items.find( + span => + span.attributes['sentry.op']?.value === 'gen_ai.generate_content' && + span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] !== undefined, + ); + expect(toolGenerateContentSpan).toBeDefined(); + expect(toolGenerateContentSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toBeDefined(); + expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15); + + const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolExecutionSpan).toBeDefined(); + expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-error-in-tool.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('captures error in tool', async () => { + let transactionEvent: Event | undefined; + let errorEvent: Event | undefined; + + await createRunner() + .expect({ + transaction: transaction => { + transactionEvent = transaction; + }, + }) + .expect({ + span: container => { + expect(container.items).toHaveLength(3); + + const invokeAgentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent', + ); + expect(invokeAgentSpan).toBeDefined(); + + const generateContentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.generate_content', + ); + expect(generateContentSpan).toBeDefined(); + + const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolSpan).toBeDefined(); + expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + }, + }) + .expect({ + event: event => { + errorEvent = event; + }, + }) + .start() + .completed(); + + expect(transactionEvent).toBeDefined(); + expect(transactionEvent!.transaction).toBe('main'); + + expect(errorEvent).toBeDefined(); + expect(errorEvent!.level).toBe('error'); + expect(errorEvent!.tags).toEqual( + expect.objectContaining({ + 'vercel.ai.tool.name': 'getWeather', + 'vercel.ai.tool.callId': 'call-1', + }), + ); + + expect(transactionEvent!.contexts!.trace!.trace_id).toBe(errorEvent!.contexts!.trace!.trace_id); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-tool-loop-agent.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for ToolLoopAgent with tool calls', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(4); + + const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent weather_agent'); + expect(invokeAgentSpan).toBeDefined(); + expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent'); + expect(invokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + + const toolCallsGenerateContentSpan = container.items.find( + span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["tool_call"]', + ); + expect(toolCallsGenerateContentSpan).toBeDefined(); + expect(toolCallsGenerateContentSpan!.name).toBe('generate_content mock-model-id'); + expect(toolCallsGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10); + expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20); + + const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather'); + expect(toolSpan).toBeDefined(); + expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool'); + expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather'); + expect(toolSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1'); + expect(toolSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function'); + + const finalGenerateContentSpan = container.items.find( + span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["stop"]', + ); + expect(finalGenerateContentSpan).toBeDefined(); + expect(finalGenerateContentSpan!.name).toBe('generate_content mock-model-id'); + expect(finalGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content'); + expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15); + expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(25); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-stream-text.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for streamText', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(2); + + const invokeAgentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.invoke_agent', + ); + expect(invokeAgentSpan).toBeDefined(); + expect(invokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + + const generateContentSpan = container.items.find( + span => span.attributes['sentry.op']?.value === 'gen_ai.generate_content', + ); + expect(generateContentSpan).toBeDefined(); + expect(generateContentSpan!.name).toBe('generate_content mock-model-id'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-embed.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('creates spans for embed', async () => { + await createRunner() + .expect({ transaction: { transaction: 'main' } }) + .expect({ + span: container => { + expect(container.items).toHaveLength(1); + + const embedSpan = container.items[0]; + expect(embedSpan).toBeDefined(); + expect(embedSpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings'); + expect(embedSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id'); + expect(embedSpan!.name).toBe('embeddings mock-model-id'); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); + + createEsmAndCjsTests( + __dirname, + 'scenario-disabled.mjs', + 'instrument.mjs', + (createRunner, test) => { + test('does not create spans when telemetry is disabled', async () => { + await createRunner() + .expect({ + transaction: transaction => { + expect(transaction.transaction).toBe('main'); + expect(transaction.spans).toHaveLength(0); + }, + }) + .start() + .completed(); + }); + }, + { + additionalDependencies: { + ai: '^7.0.0-canary', + }, + }, + ); +}); diff --git a/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts new file mode 100644 index 000000000000..1e48c9775295 --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/dc-handlers.ts @@ -0,0 +1,312 @@ +import { subscribe } from 'node:diagnostics_channel'; +import { + captureException, + getClient, + SEMANTIC_ATTRIBUTE_SENTRY_OP, + SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN, + startInactiveSpan, + withScope, +} from '@sentry/core'; +import type { Span } from '@sentry/core'; +import { INTEGRATION_NAME } from './constants'; +import { safeStringify } from './dc-utils'; +import { determineRecordingSettings } from './instrumentation'; +import type { + AiSdkContentPart, + AiSdkFinishReason, + AiSdkOperationId, + AiSdkToolCall, + AiSdkUsage, + VercelAiIntegration, +} from './types'; + +const ORIGIN = 'auto.vercelai.dc'; + +interface CallState { + rootSpan: Span; + inferenceSpan?: Span; + toolSpans: Map; + recordInputs: boolean; + recordOutputs: boolean; +} + +const callStates = new Map(); + +const OPERATION_NAME_MAP: Record = { + 'ai.generateText': 'invoke_agent', + 'ai.streamText': 'invoke_agent', + 'ai.generateObject': 'invoke_agent', + 'ai.streamObject': 'invoke_agent', + 'ai.embed': 'embeddings', + 'ai.embedMany': 'embeddings', + 'ai.rerank': 'rerank', +}; + +function mapOperationName(operationId: string): string { + return OPERATION_NAME_MAP[operationId as AiSdkOperationId] ?? operationId; +} + +function getRecordingSettings(event: Record): { recordInputs: boolean; recordOutputs: boolean } { + const client = getClient(); + const integration = client?.getIntegrationByName(INTEGRATION_NAME); + const defaultPii = integration ? Boolean(client?.getOptions().sendDefaultPii) : false; + return determineRecordingSettings( + integration?.options, + { + recordInputs: event.recordInputs as boolean | undefined, + recordOutputs: event.recordOutputs as boolean | undefined, + }, + undefined, + defaultPii, + ); +} + +function setUsageAttributes(span: Span, u: AiSdkUsage): void { + if (u.inputTokens != null) span.setAttribute('gen_ai.usage.input_tokens', u.inputTokens); + if (u.outputTokens != null) span.setAttribute('gen_ai.usage.output_tokens', u.outputTokens); + if (u.inputTokens != null || u.outputTokens != null) + span.setAttribute('gen_ai.usage.total_tokens', (u.inputTokens ?? 0) + (u.outputTokens ?? 0)); + if (u.inputTokenDetails?.cacheReadTokens != null) + span.setAttribute('gen_ai.usage.input_tokens.cached', u.inputTokenDetails.cacheReadTokens); + if (u.inputTokenDetails?.cacheWriteTokens != null) + span.setAttribute('gen_ai.usage.input_tokens.cache_write', u.inputTokenDetails.cacheWriteTokens); + if (u.outputTokenDetails?.reasoningTokens != null) + span.setAttribute('gen_ai.usage.output_tokens.reasoning', u.outputTokenDetails.reasoningTokens); +} + +function normalizeFinishReason(reason: AiSdkFinishReason): string { + return reason === 'tool-calls' ? 'tool_call' : reason; +} + +function buildOutputMessages(content: AiSdkContentPart[], finishReason: AiSdkFinishReason): string | undefined { + const parts: Record[] = []; + const text = content + .filter(p => p.type === 'text' && p.text) + .map(p => p.text) + .join(''); + if (text) parts.push({ type: 'text', content: text }); + for (const tc of content.filter(p => p.type === 'tool-call')) { + const args = typeof tc.input === 'string' ? tc.input : safeStringify(tc.input ?? {}); + parts.push({ type: 'tool_call', id: tc.toolCallId, name: tc.toolName, arguments: args }); + } + if (parts.length === 0) return undefined; + return safeStringify([{ role: 'assistant', parts, finish_reason: normalizeFinishReason(finishReason) }]); +} + +function formatInputMessages(messages: unknown[]): string | undefined { + return safeStringify( + messages.map((m: unknown) => + m && typeof m === 'object' && 'role' in m ? m : { role: 'user', content: String(m) }, + ), + ); +} + +export function handleOnStart(event: Record): void { + const operationId = event.operationId as string; + const callId = event.callId as string; + const modelId = event.modelId as string; + const functionId = event.functionId as string | undefined; + const operationName = mapOperationName(operationId); + const { recordInputs, recordOutputs } = getRecordingSettings(event); + + const spanName = functionId ? `${operationName} ${functionId}` : `${operationName} ${modelId}`; + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: `gen_ai.${operationName}`, + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': operationName, + 'gen_ai.request.model': modelId, + }; + if (functionId) attributes['gen_ai.agent.name'] = functionId; + + if (recordInputs) { + const instructions = event.instructions as string | undefined; + if (instructions) { + const val = safeStringify([{ type: 'text', content: instructions }]); + if (val) attributes['gen_ai.system_instructions'] = val; + } + const messages = event.messages as unknown[] | undefined; + if (Array.isArray(messages)) { + const val = formatInputMessages(messages); + if (val) attributes['gen_ai.input.messages'] = val; + } + } + + const rootSpan = startInactiveSpan({ name: spanName, attributes }); + callStates.set(callId, { rootSpan, toolSpans: new Map(), recordInputs, recordOutputs }); +} + +export function handleOnLanguageModelCallStart(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + + const modelId = event.modelId as string; + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': 'generate_content', + 'gen_ai.request.model': modelId, + 'gen_ai.system': event.provider as string, + }; + if (state.recordInputs) { + const messages = event.messages as unknown[] | undefined; + if (Array.isArray(messages)) { + const val = formatInputMessages(messages); + if (val) attributes['gen_ai.input.messages'] = val; + } + const tools = event.tools as unknown[] | undefined; + if (Array.isArray(tools)) { + const val = safeStringify(tools); + if (val) attributes['gen_ai.request.available_tools'] = val; + } + } + state.inferenceSpan = startInactiveSpan({ name: `generate_content ${modelId}`, attributes }); +} + +export function handleOnLanguageModelCallEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state?.inferenceSpan) return; + + const usage = event.usage as AiSdkUsage | undefined; + if (usage) setUsageAttributes(state.inferenceSpan, usage); + const finishReason = event.finishReason as AiSdkFinishReason | undefined; + if (finishReason) { + const val = safeStringify([normalizeFinishReason(finishReason)]); + if (val) state.inferenceSpan.setAttribute('gen_ai.response.finish_reasons', val); + } + if (event.responseId) state.inferenceSpan.setAttribute('gen_ai.response.id', event.responseId as string); + + if (state.recordOutputs) { + const content = event.content as AiSdkContentPart[] | undefined; + if (Array.isArray(content)) { + const out = buildOutputMessages(content, finishReason ?? 'stop'); + if (out) state.inferenceSpan.setAttribute('gen_ai.output.messages', out); + } + } + state.inferenceSpan.end(); + state.inferenceSpan = undefined; +} + +export function handleOnToolExecutionStart(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const toolCall = event.toolCall as AiSdkToolCall; + if (!toolCall) return; + + const attributes: Record = { + [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool', + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: ORIGIN, + 'gen_ai.operation.name': 'execute_tool', + 'gen_ai.tool.name': toolCall.toolName, + 'gen_ai.tool.call.id': toolCall.toolCallId, + 'gen_ai.tool.type': 'function', + }; + if (state.recordInputs && toolCall.input != null) { + const val = typeof toolCall.input === 'string' ? toolCall.input : safeStringify(toolCall.input); + if (val) attributes['gen_ai.tool.input'] = val; + } + state.toolSpans.set( + toolCall.toolCallId, + startInactiveSpan({ name: `execute_tool ${toolCall.toolName}`, attributes }), + ); +} + +export function handleOnToolExecutionEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const toolCall = event.toolCall as AiSdkToolCall; + if (!toolCall) return; + const toolSpan = state.toolSpans.get(toolCall.toolCallId); + if (!toolSpan) return; + + const toolOutput = event.toolOutput as { type: string; output?: unknown; error?: Error } | undefined; + if (toolOutput?.type === 'tool-result' && state.recordOutputs && toolOutput.output != null) { + const val = safeStringify(toolOutput.output); + if (val) toolSpan.setAttribute('gen_ai.tool.output', val); + } else if (toolOutput?.type === 'tool-error' && toolOutput.error) { + toolSpan.setStatus({ code: 2, message: toolOutput.error.message }); + withScope(scope => { + scope.setTag('vercel.ai.tool.name', toolCall.toolName); + scope.setTag('vercel.ai.tool.callId', toolCall.toolCallId); + scope.setLevel('error'); + captureException(toolOutput.error, { mechanism: { type: 'auto.vercelai.dc', handled: false } }); + }); + } + toolSpan.end(); + state.toolSpans.delete(toolCall.toolCallId); +} + +export function handleOnEnd(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + + const usage = (event.totalUsage ?? event.usage) as AiSdkUsage | undefined; + if (usage) setUsageAttributes(state.rootSpan, usage); + const finishReason = event.finishReason as AiSdkFinishReason | undefined; + if (finishReason) { + const val = safeStringify([normalizeFinishReason(finishReason)]); + if (val) state.rootSpan.setAttribute('gen_ai.response.finish_reasons', val); + } + if (state.recordOutputs) { + const content: AiSdkContentPart[] = []; + const text = event.text as string | undefined; + if (text) content.push({ type: 'text', text }); + for (const tc of (event.toolCalls as AiSdkToolCall[] | undefined) ?? []) { + content.push({ type: 'tool-call', toolCallId: tc.toolCallId, toolName: tc.toolName, input: tc.input }); + } + const out = buildOutputMessages(content, finishReason ?? 'stop'); + if (out) state.rootSpan.setAttribute('gen_ai.output.messages', out); + } + for (const [, s] of state.toolSpans) s.end(); + state.toolSpans.clear(); + if (state.inferenceSpan) state.inferenceSpan.end(); + state.rootSpan.end(); + callStates.delete(event.callId as string); +} + +export function handleOnError(event: Record): void { + const state = callStates.get(event.callId as string); + if (!state) return; + const error = event.error instanceof Error ? event.error : undefined; + const endWithError = (span: Span): void => { + span.setStatus({ code: 2, message: error?.message ?? 'unknown error' }); + span.end(); + }; + for (const [, s] of state.toolSpans) endWithError(s); + state.toolSpans.clear(); + if (state.inferenceSpan) endWithError(state.inferenceSpan); + endWithError(state.rootSpan); + callStates.delete(event.callId as string); +} + +const DC_CHANNEL = 'aisdk:telemetry'; + +const DC_HANDLERS: Record) => void> = { + onStart: handleOnStart, + onLanguageModelCallStart: handleOnLanguageModelCallStart, + onLanguageModelCallEnd: handleOnLanguageModelCallEnd, + onToolExecutionStart: handleOnToolExecutionStart, + onToolExecutionEnd: handleOnToolExecutionEnd, + onEnd: handleOnEnd, + onError: handleOnError, +}; + +let subscribed = false; + +/** Subscribe to AI SDK v7+ diagnostic channel. Inert on v3-v6. */ +export function subscribeAiSdkDiagnosticChannel(): void { + if (subscribed) return; + subscribed = true; + + try { + subscribe(DC_CHANNEL, (message: unknown) => { + const msg = message as { type: string; event: Record }; + try { + DC_HANDLERS[msg?.type]?.(msg.event); + } catch { + // Never let telemetry processing break the application + } + }); + } catch { + // subscribe may not be available on all runtimes + } +} diff --git a/packages/node/src/integrations/tracing/vercelai/dc-utils.ts b/packages/node/src/integrations/tracing/vercelai/dc-utils.ts new file mode 100644 index 000000000000..ee380886c52d --- /dev/null +++ b/packages/node/src/integrations/tracing/vercelai/dc-utils.ts @@ -0,0 +1,7 @@ +export function safeStringify(value: unknown): string | undefined { + try { + return JSON.stringify(value); + } catch { + return undefined; + } +} diff --git a/packages/node/src/integrations/tracing/vercelai/index.ts b/packages/node/src/integrations/tracing/vercelai/index.ts index a0b3f3126d01..b7b97398dea5 100644 --- a/packages/node/src/integrations/tracing/vercelai/index.ts +++ b/packages/node/src/integrations/tracing/vercelai/index.ts @@ -2,6 +2,7 @@ import type { Client, IntegrationFn } from '@sentry/core'; import { addVercelAiProcessors, defineIntegration } from '@sentry/core'; import { generateInstrumentOnce, type modulesIntegration } from '@sentry/node-core'; import { INTEGRATION_NAME } from './constants'; +import { subscribeAiSdkDiagnosticChannel } from './dc-handlers'; import { SentryVercelAiInstrumentation } from './instrumentation'; import type { VercelAiOptions } from './types'; @@ -35,6 +36,10 @@ const _vercelAIIntegration = ((options: VercelAiOptions = {}) => { } else { instrumentation?.callWhenPatched(() => addVercelAiProcessors(client)); } + + // AI SDK v7+ publishes telemetry events to node:diagnostics_channel. + // On v3-v6 the channel is never published to, so this is inert. + subscribeAiSdkDiagnosticChannel(); }, }; }) satisfies IntegrationFn; diff --git a/packages/node/src/integrations/tracing/vercelai/types.ts b/packages/node/src/integrations/tracing/vercelai/types.ts index 624212f9f7bd..a5addad6a79b 100644 --- a/packages/node/src/integrations/tracing/vercelai/types.ts +++ b/packages/node/src/integrations/tracing/vercelai/types.ts @@ -73,3 +73,39 @@ export interface VercelAiOptions { export interface VercelAiIntegration extends Integration { options: VercelAiOptions; } + +// -- Diagnostic channel event types -- +// Shapes derived from the AI SDK telemetry events. +// See: https://github.com/vercel/ai/blob/main/packages/ai/core/telemetry + +export type AiSdkOperationId = + | 'ai.generateText' + | 'ai.streamText' + | 'ai.generateObject' + | 'ai.streamObject' + | 'ai.embed' + | 'ai.embedMany' + | 'ai.rerank'; + +export type AiSdkFinishReason = 'stop' | 'length' | 'content-filter' | 'tool-calls' | 'error' | 'other' | 'unknown'; + +export interface AiSdkUsage { + inputTokens?: number; + outputTokens?: number; + inputTokenDetails?: { cacheReadTokens?: number; cacheWriteTokens?: number }; + outputTokenDetails?: { reasoningTokens?: number }; +} + +export interface AiSdkContentPart { + type: string; + text?: string; + toolCallId?: string; + toolName?: string; + input?: unknown; +} + +export interface AiSdkToolCall { + toolCallId: string; + toolName: string; + input?: unknown; +}