Skip to content

Commit cf9559a

Browse files
committed
feat(core): Add streamGenAiSpans options to stream gen_ai spans
Adds a new experimental option `streamGenAiSpans` option that controls how `gen_ai` spans are sent to Sentry. When set, the SDK extracts all `gen_ai` spans out of a transaction and sends them as v2 envelope items. This avoids payload size limits of regular transactions. ```ts Sentry.init({ dsn: "https://examplePublicKey@o0.ingest.sentry.io/0", _experiments: { streamGenAiSpans: true, }, }); ``` Closes: #20170
1 parent e9791d3 commit cf9559a

88 files changed

Lines changed: 5395 additions & 5667 deletions

File tree

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

CHANGELOG.md

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,25 @@
66

77
Work in this release was contributed by @dmmulroy. Thank you for your contribution!
88

9+
### Important Changes
10+
11+
- **feat(core): Add `streamGenAiSpans` options to stream gen_ai spans ([#20785](https://github.com/getsentry/sentry-javascript/pull/20785))**
12+
13+
Adds a new experimental option `streamGenAiSpans` option that controls how `gen_ai` spans are
14+
sent to Sentry. When set, the SDK extracts all `gen_ai` spans out of a
15+
transaction and sends them as v2 envelope items.
16+
17+
This avoids payload size limits of regular transactions.
18+
19+
```ts
20+
Sentry.init({
21+
dsn: 'https://examplePublicKey@o0.ingest.sentry.io/0',
22+
_experiments: {
23+
streamGenAiSpans: true,
24+
},
25+
});
26+
```
27+
928
## 10.52.0
1029

1130
### Important Changes

dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ export default Sentry.withSentry(
1616
(env: Env) => ({
1717
dsn: env.SENTRY_DSN,
1818
tracesSampleRate: 1.0,
19+
_experiments: { streamGenAiSpans: true },
1920
}),
2021
{
2122
async fetch(_request, _env, _ctx) {

dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts

Lines changed: 28 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
21
import { expect, it } from 'vitest';
32
import {
43
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -21,30 +20,36 @@ it('traces a basic message creation request', async ({ signal }) => {
2120
const runner = createRunner(__dirname)
2221
.ignore('event')
2322
.expect(envelope => {
23+
// Transaction item (first item in envelope)
2424
const transactionEvent = envelope[1]?.[0]?.[1] as any;
25-
2625
expect(transactionEvent.transaction).toBe('GET /');
27-
expect(transactionEvent.spans).toEqual(
28-
expect.arrayContaining([
29-
expect.objectContaining({
30-
data: expect.objectContaining({
31-
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
32-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
33-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
34-
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
35-
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
36-
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
37-
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
38-
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123',
39-
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
40-
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
41-
}),
42-
description: 'chat claude-3-haiku-20240307',
43-
op: 'gen_ai.chat',
44-
origin: 'auto.ai.anthropic',
45-
}),
46-
]),
47-
);
26+
27+
// Span container item (second item in same envelope)
28+
const container = envelope[1]?.[1]?.[1] as any;
29+
expect(container).toBeDefined();
30+
31+
expect(container.items).toHaveLength(1);
32+
const [firstSpan] = container.items;
33+
34+
// [0] chat claude-3-haiku-20240307
35+
expect(firstSpan!.name).toBe('chat claude-3-haiku-20240307');
36+
expect(firstSpan!.status).toBe('ok');
37+
expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
38+
expect(firstSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
39+
expect(firstSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.anthropic' });
40+
expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'anthropic' });
41+
expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
42+
type: 'string',
43+
value: 'claude-3-haiku-20240307',
44+
});
45+
expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
46+
expect(firstSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
47+
type: 'string',
48+
value: 'claude-3-haiku-20240307',
49+
});
50+
expect(firstSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({ type: 'string', value: 'msg_mock123' });
51+
expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 10 });
52+
expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 15 });
4853
})
4954
.start(signal);
5055
await runner.makeRequest('get', '/');

dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,7 @@ export default Sentry.withSentry(
1616
(env: Env) => ({
1717
dsn: env.SENTRY_DSN,
1818
tracesSampleRate: 1.0,
19+
_experiments: { streamGenAiSpans: true },
1920
}),
2021
{
2122
async fetch(_request, _env, _ctx) {
Lines changed: 57 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
21
import { expect, it } from 'vitest';
32
import {
43
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -18,67 +17,69 @@ import { createRunner } from '../../../runner';
1817
// want to test that the instrumentation does not break in our
1918
// cloudflare SDK.
2019

21-
it('traces Google GenAI chat creation and message sending', async () => {
20+
it('traces Google GenAI chat creation and message sending', async ({ signal }) => {
2221
const runner = createRunner(__dirname)
2322
.ignore('event')
2423
.expect(envelope => {
24+
// Transaction item (first item in envelope)
2525
const transactionEvent = envelope[1]?.[0]?.[1] as any;
26-
2726
expect(transactionEvent.transaction).toBe('GET /');
28-
expect(transactionEvent.spans).toEqual(
29-
expect.arrayContaining([
30-
// chat.sendMessage
31-
expect.objectContaining({
32-
data: expect.objectContaining({
33-
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
34-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
35-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
36-
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
37-
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
38-
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
39-
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
40-
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
41-
}),
42-
description: 'chat gemini-1.5-pro',
43-
op: 'gen_ai.chat',
44-
origin: 'auto.ai.google_genai',
45-
}),
46-
// models.generateContent
47-
expect.objectContaining({
48-
data: expect.objectContaining({
49-
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
50-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
51-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
52-
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
53-
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
54-
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
55-
[GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
56-
[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
57-
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
58-
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
59-
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
60-
}),
61-
description: 'generate_content gemini-1.5-flash',
62-
op: 'gen_ai.generate_content',
63-
origin: 'auto.ai.google_genai',
64-
}),
65-
// models.embedContent
66-
expect.objectContaining({
67-
data: expect.objectContaining({
68-
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
69-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
70-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
71-
[GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
72-
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
73-
}),
74-
description: 'embeddings text-embedding-004',
75-
op: 'gen_ai.embeddings',
76-
origin: 'auto.ai.google_genai',
77-
}),
78-
]),
79-
);
27+
28+
// Span container item (second item in same envelope)
29+
const container = envelope[1]?.[1]?.[1] as any;
30+
expect(container).toBeDefined();
31+
expect(container.items).toHaveLength(3);
32+
const [firstSpan, secondSpan, thirdSpan] = container.items;
33+
34+
// [0] chat gemini-1.5-pro
35+
expect(firstSpan!.name).toBe('chat gemini-1.5-pro');
36+
expect(firstSpan!.status).toBe('ok');
37+
expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
38+
expect(firstSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
39+
expect(firstSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.google_genai' });
40+
expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'google_genai' });
41+
expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
42+
type: 'string',
43+
value: 'gemini-1.5-pro',
44+
});
45+
expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 8 });
46+
expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 12 });
47+
expect(firstSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 20 });
48+
49+
// [1] generate_content gemini-1.5-flash
50+
expect(secondSpan!.name).toBe('generate_content gemini-1.5-flash');
51+
expect(secondSpan!.status).toBe('ok');
52+
expect(secondSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
53+
type: 'string',
54+
value: 'generate_content',
55+
});
56+
expect(secondSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.generate_content' });
57+
expect(secondSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.google_genai' });
58+
expect(secondSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'google_genai' });
59+
expect(secondSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
60+
type: 'string',
61+
value: 'gemini-1.5-flash',
62+
});
63+
expect(secondSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
64+
expect(secondSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE]).toEqual({ type: 'double', value: 0.9 });
65+
expect(secondSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 100 });
66+
expect(secondSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 8 });
67+
expect(secondSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 12 });
68+
expect(secondSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 20 });
69+
70+
// [2] embeddings text-embedding-004
71+
expect(thirdSpan!.name).toBe('embeddings text-embedding-004');
72+
expect(thirdSpan!.status).toBe('ok');
73+
expect(thirdSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'embeddings' });
74+
expect(thirdSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.embeddings' });
75+
expect(thirdSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.google_genai' });
76+
expect(thirdSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'google_genai' });
77+
expect(thirdSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
78+
type: 'string',
79+
value: 'text-embedding-004',
80+
});
8081
})
81-
.start();
82+
.start(signal);
8283
await runner.makeRequest('get', '/');
8384
await runner.completed();
8485
});

dev-packages/cloudflare-integration-tests/suites/tracing/langchain/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ export default Sentry.withSentry(
99
(env: Env) => ({
1010
dsn: env.SENTRY_DSN,
1111
tracesSampleRate: 1.0,
12+
_experiments: { streamGenAiSpans: true },
1213
}),
1314
{
1415
async fetch(_request, _env, _ctx) {

dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts

Lines changed: 38 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
21
import { expect, it } from 'vitest';
32
import {
43
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -22,53 +21,46 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
2221
const runner = createRunner(__dirname)
2322
.ignore('event')
2423
.expect(envelope => {
24+
// Transaction item (first item in envelope)
2525
const transactionEvent = envelope[1]?.[0]?.[1] as any;
26-
2726
expect(transactionEvent.transaction).toBe('GET /');
28-
expect(transactionEvent.spans).toEqual(
29-
expect.arrayContaining([
30-
// Chat model span
31-
expect.objectContaining({
32-
data: expect.objectContaining({
33-
[GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
34-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
35-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
36-
[GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
37-
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
38-
[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
39-
[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
40-
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
41-
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
42-
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
43-
}),
44-
description: 'chat claude-3-5-sonnet-20241022',
45-
op: 'gen_ai.chat',
46-
origin: 'auto.ai.langchain',
47-
}),
48-
// Chain span
49-
expect.objectContaining({
50-
data: expect.objectContaining({
51-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
52-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
53-
'langchain.chain.name': 'my_test_chain',
54-
}),
55-
description: 'chain my_test_chain',
56-
op: 'gen_ai.invoke_agent',
57-
origin: 'auto.ai.langchain',
58-
}),
59-
// Tool span
60-
expect.objectContaining({
61-
data: expect.objectContaining({
62-
[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
63-
[SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
64-
[GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool',
65-
}),
66-
description: 'execute_tool search_tool',
67-
op: 'gen_ai.execute_tool',
68-
origin: 'auto.ai.langchain',
69-
}),
70-
]),
71-
);
27+
28+
// Span container item (second item in same envelope)
29+
const container = envelope[1]?.[1]?.[1] as any;
30+
expect(container).toBeDefined();
31+
expect(container.items).toHaveLength(3);
32+
const [firstSpan, secondSpan, thirdSpan] = container.items;
33+
34+
// [0] chat claude-3-5-sonnet-20241022
35+
expect(firstSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
36+
expect(firstSpan!.status).toBe('ok');
37+
expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
38+
expect(firstSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
39+
expect(firstSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
40+
expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'anthropic' });
41+
expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
42+
type: 'string',
43+
value: 'claude-3-5-sonnet-20241022',
44+
});
45+
expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
46+
expect(firstSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 100 });
47+
expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 10 });
48+
expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 15 });
49+
expect(firstSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 25 });
50+
51+
// [1] chain my_test_chain
52+
expect(secondSpan!.name).toBe('chain my_test_chain');
53+
expect(secondSpan!.status).toBe('ok');
54+
expect(secondSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
55+
expect(secondSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.invoke_agent' });
56+
expect(secondSpan!.attributes['langchain.chain.name']).toEqual({ type: 'string', value: 'my_test_chain' });
57+
58+
// [2] execute_tool search_tool
59+
expect(thirdSpan!.name).toBe('execute_tool search_tool');
60+
expect(thirdSpan!.status).toBe('ok');
61+
expect(thirdSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
62+
expect(thirdSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.execute_tool' });
63+
expect(thirdSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'search_tool' });
7264
})
7365
.start(signal);
7466
await runner.makeRequest('get', '/');

dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ export default Sentry.withSentry(
1010
dsn: env.SENTRY_DSN,
1111
tracesSampleRate: 1.0,
1212
sendDefaultPii: true,
13+
_experiments: { streamGenAiSpans: true },
1314
}),
1415
{
1516
async fetch(_request, _env, _ctx) {

0 commit comments

Comments
 (0)