diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index daaf5effc2a0..2d7781c5f43f 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -485,13 +485,6 @@ jobs:
run: yarn test:ci:browser
if: github.event_name != 'pull_request'
- - name: Parse and Upload Coverage
- if: cancelled() == false
- continue-on-error: true
- uses: getsentry/codecov-action@main
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
-
job_bun_unit_tests:
name: Bun Unit Tests
needs: [job_get_metadata, job_build]
@@ -554,7 +547,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- node: [18, 20, 22, 24]
+ node: [18, 20, 22, 24, 26]
steps:
- name: Check out base commit (${{ github.event.pull_request.base.sha }})
uses: actions/checkout@v6
@@ -586,14 +579,6 @@ jobs:
env:
NODE_VERSION: ${{ matrix.node }}
- - name: Parse and Upload Coverage
- if: cancelled() == false
- continue-on-error: true
- uses: getsentry/codecov-action@main
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
- name: ${{ matrix.node }}
-
job_browser_playwright_tests:
name:
Playwright ${{ matrix.bundle }}${{ matrix.project && matrix.project != 'chromium' && format(' {0}',
@@ -693,18 +678,6 @@ jobs:
overwrite: true
retention-days: 7
- - name: Parse and Upload Coverage
- if: cancelled() == false
- continue-on-error: true
- uses: getsentry/codecov-action@main
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
- directory: dev-packages/browser-integration-tests
- enable-coverage: false
- name:
- browser-playwright-${{ matrix.bundle }}-${{ matrix.project }}${{ matrix.shard && format('-{0}',
- matrix.shard) || '' }}
-
job_browser_loader_tests:
name: PW ${{ matrix.bundle }} Tests
needs: [job_get_metadata, job_build, job_build_bundles]
@@ -764,16 +737,6 @@ jobs:
overwrite: true
retention-days: 7
- - name: Parse and Upload Coverage
- uses: getsentry/codecov-action@main
- if: cancelled() == false
- continue-on-error: true
- with:
- token: ${{ secrets.GITHUB_TOKEN }}
- directory: dev-packages/browser-integration-tests
- enable-coverage: false
- name: browser-loader-${{ matrix.bundle }}
-
job_check_for_faulty_dts:
name: Check for faulty .d.ts files
needs: [job_get_metadata, job_build]
@@ -810,7 +773,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- node: [18, 20, 22, 24]
+ node: [18, 20, 22, 24, 26]
typescript:
- false
include:
@@ -851,7 +814,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- node: [18, 20, 22, 24]
+ node: [18, 20, 22, 24, 26]
typescript:
- false
include:
@@ -939,7 +902,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- node: [18, 20, 22, 24]
+ node: [18, 20, 22, 24, 26]
steps:
- name: Check out current commit (${{ needs.job_get_metadata.outputs.commit_label }})
uses: actions/checkout@v6
diff --git a/.size-limit.js b/.size-limit.js
index c1be177bc5a2..2c0830fee349 100644
--- a/.size-limit.js
+++ b/.size-limit.js
@@ -72,7 +72,7 @@ module.exports = [
path: 'packages/browser/build/npm/esm/prod/index.js',
import: createImport('init', 'browserTracingIntegration', 'replayIntegration'),
gzip: true,
- limit: '78 KB',
+ limit: '79 KB',
disablePlugins: ['@size-limit/esbuild'],
modifyWebpackConfig: function (config) {
const webpack = require('webpack');
@@ -120,7 +120,7 @@ module.exports = [
path: 'packages/browser/build/npm/esm/prod/index.js',
import: createImport('init', 'sendFeedback'),
gzip: true,
- limit: '36 KB',
+ limit: '37 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -162,7 +162,7 @@ module.exports = [
import: createImport('init', 'ErrorBoundary'),
ignore: ['react/jsx-runtime'],
gzip: true,
- limit: '33 KB',
+ limit: '34 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -188,7 +188,7 @@ module.exports = [
path: 'packages/vue/build/esm/index.js',
import: createImport('init', 'browserTracingIntegration'),
gzip: true,
- limit: '51 KB',
+ limit: '52 KB',
disablePlugins: ['@size-limit/esbuild'],
},
// Svelte SDK (ESM)
@@ -261,7 +261,7 @@ module.exports = [
name: 'CDN Bundle (incl. Tracing, Replay, Feedback, Logs, Metrics)',
path: createCDNPath('bundle.tracing.replay.feedback.logs.metrics.min.js'),
gzip: true,
- limit: '96 KB',
+ limit: '97 KB',
disablePlugins: ['@size-limit/esbuild'],
},
// browser CDN bundles (non-gzipped)
@@ -286,7 +286,7 @@ module.exports = [
path: createCDNPath('bundle.logs.metrics.min.js'),
gzip: false,
brotli: false,
- limit: '94 KB',
+ limit: '95 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -302,7 +302,7 @@ module.exports = [
path: createCDNPath('bundle.replay.logs.metrics.min.js'),
gzip: false,
brotli: false,
- limit: '218 KB',
+ limit: '219 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -310,7 +310,7 @@ module.exports = [
path: createCDNPath('bundle.tracing.replay.min.js'),
gzip: false,
brotli: false,
- limit: '263 KB',
+ limit: '264 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -326,7 +326,7 @@ module.exports = [
path: createCDNPath('bundle.tracing.replay.feedback.min.js'),
gzip: false,
brotli: false,
- limit: '277 KB',
+ limit: '278 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -364,7 +364,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
- limit: '65 KB',
+ limit: '66 KB',
disablePlugins: ['@size-limit/esbuild'],
},
// Node SDK (ESM)
@@ -374,7 +374,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
- limit: '176 KB',
+ limit: '171 KB',
disablePlugins: ['@size-limit/esbuild'],
},
{
@@ -382,7 +382,7 @@ module.exports = [
path: 'packages/node/build/esm/index.js',
import: createImport('initWithoutDefaultIntegrations', 'getDefaultIntegrationsWithoutPerformance'),
gzip: true,
- limit: '102 KB',
+ limit: '79 KB',
disablePlugins: ['@size-limit/esbuild'],
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
modifyWebpackConfig: function (config) {
@@ -406,7 +406,7 @@ module.exports = [
import: createImport('init'),
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: true,
- limit: '119 KB',
+ limit: '113 KB',
disablePlugins: ['@size-limit/esbuild'],
},
// Cloudflare SDK (ESM) - compressed, minified to match `wrangler deploy --dry-run --minify` output
@@ -417,7 +417,7 @@ module.exports = [
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: false,
brotli: false,
- limit: '167 KiB',
+ limit: '171 KiB',
disablePlugins: ['@size-limit/webpack'],
webpack: false,
modifyEsbuildConfig: function (config) {
@@ -437,7 +437,7 @@ module.exports = [
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
gzip: false,
brotli: false,
- limit: '420 KiB',
+ limit: '423 KiB',
disablePlugins: ['@size-limit/webpack'],
webpack: false,
modifyEsbuildConfig: function (config) {
diff --git a/CHANGELOG.md b/CHANGELOG.md
index f23fe92ce89b..0e1bac4c474d 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,63 @@
- "You miss 100 percent of the chances you don't take. — Wayne Gretzky" — Michael Scott
+## 10.53.0
+
+### Important Changes
+
+- **feat(core): Add `streamGenAiSpans` options to stream gen_ai spans ([#20785](https://github.com/getsentry/sentry-javascript/pull/20785))**
+
+ Adds a new `streamGenAiSpans` option that controls how `gen_ai` spans are
+ sent to Sentry. When set, the SDK extracts all `gen_ai` spans out of a
+ transaction and sends them as v2 envelope items.
+
+ Enable this option if gen_ai spans are being dropped because the transaction payload exceeds size limits.
+
+ ```ts
+ Sentry.init({
+ dsn: 'https://examplePublicKey@o0.ingest.sentry.io/0',
+ streamGenAiSpans: true,
+ });
+ ```
+
+### Other Changes
+
+- feat(browser): Migrate browser profiling thread data to span attributes ([#20800](https://github.com/getsentry/sentry-javascript/pull/20800))
+- feat(core): Add `addConsoleInstrumentationFilter` utility ([#20790](https://github.com/getsentry/sentry-javascript/pull/20790))
+- feat(core): Add `applicationKey` to `BuildTimeOptionsBase` ([#20789](https://github.com/getsentry/sentry-javascript/pull/20789))
+- feat(core): split exports by browser/server for bundle size ([#20435](https://github.com/getsentry/sentry-javascript/pull/20435))
+- feat(nextjs): Add top-level `applicationKey` option ([#20794](https://github.com/getsentry/sentry-javascript/pull/20794))
+- feat(node): Support Node 26 ([#20710](https://github.com/getsentry/sentry-javascript/pull/20710))
+- feat(profiling-node): Bump `@sentry-internal/node-cpu-profiler` to 2.4.0 ([#20720](https://github.com/getsentry/sentry-javascript/pull/20720))
+- fix(cloudflare): avoid flush lock self-wait ([#20719](https://github.com/getsentry/sentry-javascript/pull/20719))
+- fix(hono): Capture transaction name on request for correct culprit ([#20801](https://github.com/getsentry/sentry-javascript/pull/20801))
+- fix(mcp): retroactively wrap handlers registered before wrapMcpServerWithSentry ([#20699](https://github.com/getsentry/sentry-javascript/pull/20699))
+- fix(node-core): Guard against undefined util.getSystemErrorMap ([#20660](https://github.com/getsentry/sentry-javascript/pull/20660))
+- fix(replay): Capture aborted/errored fetch requests in replay network tab ([#20722](https://github.com/getsentry/sentry-javascript/pull/20722))
+
+
+ Internal Changes
+
+- chore: bump replay dependencies ([#20746](https://github.com/getsentry/sentry-javascript/pull/20746))
+- chore: Typo intergation -> integration ([#20799](https://github.com/getsentry/sentry-javascript/pull/20799))
+- chore(deps): Bump @babel/plugin-transform-modules-systemjs from 7.24.1 to 7.29.4 ([#20773](https://github.com/getsentry/sentry-javascript/pull/20773))
+- chore(deps): Bump next from 15.5.15 to 15.5.18 in /dev-packages/e2e-tests/test-applications/nextjs-15 ([#20818](https://github.com/getsentry/sentry-javascript/pull/20818))
+- chore(deps): Bump next from 16.2.4 to 16.2.6 in /dev-packages/e2e-tests/test-applications/nextjs-16-streaming ([#20811](https://github.com/getsentry/sentry-javascript/pull/20811))
+- chore(deps): Bump rollup from 4.59.0 to 4.60.3 ([#20716](https://github.com/getsentry/sentry-javascript/pull/20716))
+- ci: Ensure PR reminder workflow considers new sub teams ([#20814](https://github.com/getsentry/sentry-javascript/pull/20814))
+- ci: Remove codecov reporting ([#20803](https://github.com/getsentry/sentry-javascript/pull/20803))
+- feat(deps): Bump bundler plugins to 5.3.0 ([#20820](https://github.com/getsentry/sentry-javascript/pull/20820))
+- feat(deps): Bump fast-uri from 3.0.6 to 3.1.2 ([#20774](https://github.com/getsentry/sentry-javascript/pull/20774))
+- feat(deps): Bump hono from 4.12.16 to 4.12.18 ([#20777](https://github.com/getsentry/sentry-javascript/pull/20777))
+- test(cloudflare-hono): fix 'occured' -> 'occurred' typo in error log ([#20783](https://github.com/getsentry/sentry-javascript/pull/20783))
+- test(deps): Bump hono from 4.12.14 to 4.12.16 ([#20712](https://github.com/getsentry/sentry-javascript/pull/20712))
+- test(deps): Bump hono from 4.12.14 to 4.12.18 in /dev-packages/e2e-tests/test-applications/cloudflare-hono ([#20776](https://github.com/getsentry/sentry-javascript/pull/20776))
+- test(e2e): Pin astro version in astro-6 test app ([#20709](https://github.com/getsentry/sentry-javascript/pull/20709))
+
+
+
+Work in this release was contributed by @dmmulroy and @SAY-5. Thank you for your contributions!
+
## 10.52.0
### Important Changes
diff --git a/codecov.yml b/codecov.yml
deleted file mode 100644
index ae6500091084..000000000000
--- a/codecov.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-comment: false
-
-coverage:
- status:
- project:
- default:
- enabled: false
- patch:
- default:
- enabled: false
- ignore:
- - 'packages/deno/**'
-
-config:
- files: changed
diff --git a/dev-packages/browser-integration-tests/package.json b/dev-packages/browser-integration-tests/package.json
index 574536fb83c5..cdab29e79ffd 100644
--- a/dev-packages/browser-integration-tests/package.json
+++ b/dev-packages/browser-integration-tests/package.json
@@ -59,7 +59,7 @@
"@babel/core": "^7.27.7",
"@babel/preset-typescript": "^7.16.7",
"@playwright/test": "~1.56.0",
- "@sentry-internal/rrweb": "2.34.0",
+ "@sentry-internal/rrweb": "2.42.0",
"@sentry/browser": "10.52.0",
"@sentry-internal/replay": "10.52.0",
"@sentry/opentelemetry": "10.52.0",
diff --git a/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/subject.js b/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/subject.js
new file mode 100644
index 000000000000..8ad2a8eac5af
--- /dev/null
+++ b/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/subject.js
@@ -0,0 +1,37 @@
+import * as Sentry from '@sentry/browser';
+import { browserProfilingIntegration, spanStreamingIntegration } from '@sentry/browser';
+
+window.Sentry = Sentry;
+
+const client = Sentry.init({
+ dsn: 'https://public@dsn.ingest.sentry.io/1337',
+ integrations: [spanStreamingIntegration(), browserProfilingIntegration()],
+ tracesSampleRate: 1,
+ traceLifecycle: 'stream',
+ profileSessionSampleRate: 1,
+ profileLifecycle: 'trace',
+});
+
+function fibonacci(n) {
+ if (n <= 1) {
+ return n;
+ }
+ return fibonacci(n - 1) + fibonacci(n - 2);
+}
+
+await Sentry.startSpanManual({ name: 'root-fibonacci', parentSpan: null }, async span => {
+ fibonacci(40);
+
+ Sentry.startSpan({ name: 'child-span-1', parentSpan: span }, () => {
+ fibonacci(20);
+ });
+
+ Sentry.startSpan({ name: 'child-span-2', parentSpan: span }, () => {
+ fibonacci(20);
+ });
+
+ await new Promise(resolve => setTimeout(resolve, 40));
+ span.end();
+});
+
+await client?.flush(5000);
diff --git a/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/test.ts b/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/test.ts
new file mode 100644
index 000000000000..f033302c9579
--- /dev/null
+++ b/dev-packages/browser-integration-tests/suites/profiling/traceLifecycleMode_streamed-spans/test.ts
@@ -0,0 +1,36 @@
+import { expect } from '@playwright/test';
+import { sentryTest } from '../../../utils/fixtures';
+import { shouldSkipTracingTest } from '../../../utils/helpers';
+import { waitForStreamedSpans } from '../../../utils/spanUtils';
+
+sentryTest(
+ 'attaches thread.id and thread.name to streamed spans (trace mode)',
+ async ({ page, getLocalTestUrl, browserName }) => {
+ if (shouldSkipTracingTest() || browserName !== 'chromium') {
+ sentryTest.skip();
+ }
+
+ const spansPromise = waitForStreamedSpans(page, receivedSpans => {
+ return receivedSpans.some(s => s.name === 'root-fibonacci');
+ });
+
+ const url = await getLocalTestUrl({ testDir: __dirname, responseHeaders: { 'Document-Policy': 'js-profiling' } });
+ await page.goto(url);
+
+ const spans = await spansPromise;
+
+ const rootSpan = spans.find(s => s.name === 'root-fibonacci');
+ expect(rootSpan).toBeDefined();
+
+ expect(rootSpan!.attributes?.['thread.id']).toEqual({ type: 'string', value: '0' });
+ expect(rootSpan!.attributes?.['thread.name']).toEqual({ type: 'string', value: 'main' });
+
+ const childSpans = spans.filter(s => s.name === 'child-span-1' || s.name === 'child-span-2');
+ expect(childSpans.length).toBeGreaterThanOrEqual(1);
+
+ for (const child of childSpans) {
+ expect(child.attributes?.['thread.id']).toEqual({ type: 'string', value: '0' });
+ expect(child.attributes?.['thread.name']).toEqual({ type: 'string', value: 'main' });
+ }
+ },
+);
diff --git a/dev-packages/bun-integration-tests/package.json b/dev-packages/bun-integration-tests/package.json
index 114c8f7f7d47..75c34603ce71 100644
--- a/dev-packages/bun-integration-tests/package.json
+++ b/dev-packages/bun-integration-tests/package.json
@@ -15,7 +15,7 @@
"dependencies": {
"@sentry/bun": "10.52.0",
"@sentry/hono": "10.52.0",
- "hono": "^4.12.14"
+ "hono": "^4.12.18"
},
"devDependencies": {
"@sentry-internal/test-utils": "10.52.0",
diff --git a/dev-packages/bundler-tests/package.json b/dev-packages/bundler-tests/package.json
index b85893a20a50..dce2d1a4886c 100644
--- a/dev-packages/bundler-tests/package.json
+++ b/dev-packages/bundler-tests/package.json
@@ -14,7 +14,7 @@
"dependencies": {
"@rollup/plugin-node-resolve": "^16.0.3",
"@sentry/browser": "10.52.0",
- "rollup": "^4.0.0",
+ "rollup": "^4.60.3",
"vite": "^5.0.0",
"vitest": "^3.2.4",
"webpack": "^5.0.0"
diff --git a/dev-packages/cloudflare-integration-tests/package.json b/dev-packages/cloudflare-integration-tests/package.json
index 43b2327a3789..15e41ec37f62 100644
--- a/dev-packages/cloudflare-integration-tests/package.json
+++ b/dev-packages/cloudflare-integration-tests/package.json
@@ -16,7 +16,7 @@
"@langchain/langgraph": "^1.0.1",
"@sentry/cloudflare": "10.52.0",
"@sentry/hono": "10.52.0",
- "hono": "^4.12.14"
+ "hono": "^4.12.18"
},
"devDependencies": {
"@cloudflare/workers-types": "^4.20250922.0",
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/index.ts
index 08df5e24b05b..9ff6a9406258 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/index.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/index.ts
@@ -16,6 +16,7 @@ export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
+ streamGenAiSpans: true,
}),
{
async fetch(_request, _env, _ctx) {
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts
index 17cea5dbf95b..4f60868cddfb 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/anthropic-ai/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { expect, it } from 'vitest';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -21,30 +20,36 @@ it('traces a basic message creation request', async ({ signal }) => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
+ // Transaction item (first item in envelope)
const transactionEvent = envelope[1]?.[0]?.[1] as any;
-
expect(transactionEvent.transaction).toBe('GET /');
- expect(transactionEvent.spans).toEqual(
- expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- }),
- ]),
- );
+
+ // Span container item (second item in same envelope)
+ const container = envelope[1]?.[1]?.[1] as any;
+ expect(container).toBeDefined();
+
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat claude-3-haiku-20240307
+ expect(firstSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(firstSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ expect(firstSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.anthropic' });
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'anthropic' });
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'claude-3-haiku-20240307',
+ });
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'claude-3-haiku-20240307',
+ });
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({ type: 'string', value: 'msg_mock123' });
+ expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 10 });
+ expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 15 });
})
.start(signal);
await runner.makeRequest('get', '/');
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts
index 88328768c6f2..ca6138ce17ef 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/index.ts
@@ -16,6 +16,7 @@ export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
+ streamGenAiSpans: true,
}),
{
async fetch(_request, _env, _ctx) {
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts
index 5194e3d3a581..2724c8017c7c 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/google-genai/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { expect, it } from 'vitest';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -18,67 +17,100 @@ import { createRunner } from '../../../runner';
// want to test that the instrumentation does not break in our
// cloudflare SDK.
-it('traces Google GenAI chat creation and message sending', async () => {
+it('traces Google GenAI chat creation and message sending', async ({ signal }) => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
+ // Transaction item (first item in envelope)
const transactionEvent = envelope[1]?.[0]?.[1] as any;
-
expect(transactionEvent.transaction).toBe('GET /');
- expect(transactionEvent.spans).toEqual(
- expect.arrayContaining([
- // chat.sendMessage
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'chat gemini-1.5-pro',
- op: 'gen_ai.chat',
- origin: 'auto.ai.google_genai',
- }),
- // models.generateContent
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- }),
- // models.embedContent
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
- }),
- description: 'embeddings text-embedding-004',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- }),
- ]),
- );
+
+ // Span container item (second item in same envelope)
+ const container = envelope[1]?.[1]?.[1] as any;
+ expect(container).toBeDefined();
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'embeddings text-embedding-004',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(chatSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ expect(chatSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.google_genai' });
+ expect(chatSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'google_genai' });
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gemini-1.5-pro',
+ });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 8 });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 12 });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 20 });
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'generate_content',
+ });
+ expect(generateContentSpan!.attributes['sentry.op']).toEqual({
+ type: 'string',
+ value: 'gen_ai.generate_content',
+ });
+ expect(generateContentSpan!.attributes['sentry.origin']).toEqual({
+ type: 'string',
+ value: 'auto.ai.google_genai',
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'google_genai',
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gemini-1.5-flash',
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.7,
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE]).toEqual({ type: 'double', value: 0.9 });
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 100,
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+
+ const embeddingsSpan = container.items.find(span => span.name === 'embeddings text-embedding-004');
+ expect(embeddingsSpan).toBeDefined();
+ expect(embeddingsSpan!.status).toBe('ok');
+ expect(embeddingsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(embeddingsSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.embeddings' });
+ expect(embeddingsSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.google_genai' });
+ expect(embeddingsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'google_genai' });
+ expect(embeddingsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-004',
+ });
})
- .start();
+ .start(signal);
await runner.makeRequest('get', '/');
await runner.completed();
});
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/index.ts
index 0d59fd91c2b7..b1cd17c5eda1 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/index.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/index.ts
@@ -9,6 +9,7 @@ export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
+ streamGenAiSpans: true,
}),
{
async fetch(_request, _env, _ctx) {
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts
index d4abc4ae7220..6968d57bfef9 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langchain/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { expect, it } from 'vitest';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -22,53 +21,50 @@ it('traces langchain chat model, chain, and tool invocations', async ({ signal }
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
+ // Transaction item (first item in envelope)
const transactionEvent = envelope[1]?.[0]?.[1] as any;
-
expect(transactionEvent.transaction).toBe('GET /');
- expect(transactionEvent.spans).toEqual(
- expect.arrayContaining([
- // Chat model span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- }),
- // Chain span
- expect.objectContaining({
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- 'langchain.chain.name': 'my_test_chain',
- }),
- description: 'chain my_test_chain',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langchain',
- }),
- // Tool span
- expect.objectContaining({
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'search_tool',
- }),
- description: 'execute_tool search_tool',
- op: 'gen_ai.execute_tool',
- origin: 'auto.ai.langchain',
- }),
- ]),
- );
+
+ // Span container item (second item in same envelope)
+ const container = envelope[1]?.[1]?.[1] as any;
+ expect(container).toBeDefined();
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chain my_test_chain',
+ 'chat claude-3-5-sonnet-20241022',
+ 'execute_tool search_tool',
+ ]);
+
+ const chatSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(chatSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ expect(chatSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
+ expect(chatSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'anthropic' });
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'claude-3-5-sonnet-20241022',
+ });
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 100 });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 10 });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 15 });
+ expect(chatSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 25 });
+
+ const chainSpan = container.items.find(span => span.name === 'chain my_test_chain');
+ expect(chainSpan).toBeDefined();
+ expect(chainSpan!.status).toBe('ok');
+ expect(chainSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
+ expect(chainSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.invoke_agent' });
+ expect(chainSpan!.attributes['langchain.chain.name']).toEqual({ type: 'string', value: 'my_test_chain' });
+
+ const toolSpan = container.items.find(span => span.name === 'execute_tool search_tool');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.status).toBe('ok');
+ expect(toolSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langchain' });
+ expect(toolSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.execute_tool' });
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'search_tool' });
})
.start(signal);
await runner.makeRequest('get', '/');
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/index.ts
index 6837a14be111..c18ed4088e69 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/index.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/index.ts
@@ -10,6 +10,7 @@ export default Sentry.withSentry(
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
sendDefaultPii: true,
+ streamGenAiSpans: true,
}),
{
async fetch(_request, _env, _ctx) {
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts
index 6efa07164df5..542f29ad7206 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/langgraph/test.ts
@@ -1,11 +1,9 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { expect, it } from 'vitest';
import {
GEN_AI_AGENT_NAME_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PIPELINE_NAME_ATTRIBUTE,
- GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE,
GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE,
@@ -22,48 +20,71 @@ it('traces langgraph compile and invoke operations', async ({ signal }) => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
+ // Transaction item (first item in envelope)
const transactionEvent = envelope[1]?.[0]?.[1] as any;
-
expect(transactionEvent.transaction).toBe('GET /');
- // Check create_agent span
- const createAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.create_agent');
- expect(createAgentSpan).toMatchObject({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- },
- description: 'create_agent weather_assistant',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- });
+ // Span container item (second item in same envelope)
+ const container = envelope[1]?.[1]?.[1] as any;
+ expect(container).toBeDefined();
+
+ expect(container.items).toHaveLength(2);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'create_agent weather_assistant',
+ 'invoke_agent weather_assistant',
+ ]);
- // Check invoke_agent span
- const invokeAgentSpan = transactionEvent.spans.find((span: any) => span.op === 'gen_ai.invoke_agent');
- expect(invokeAgentSpan).toMatchObject({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in SF?"}]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- }),
- description: 'invoke_agent weather_assistant',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
+ const createAgentSpan = container.items.find(span => span.name === 'create_agent weather_assistant');
+ expect(createAgentSpan).toBeDefined();
+ expect(createAgentSpan!.status).toBe('ok');
+ expect(createAgentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'create_agent',
+ });
+ expect(createAgentSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.create_agent' });
+ expect(createAgentSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langgraph' });
+ expect(createAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'weather_assistant',
});
- // Verify tools are captured
- if (invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]) {
- expect(invokeAgentSpan.data[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toMatch(/get_weather/);
- }
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent weather_assistant');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.status).toBe('ok');
+ expect(invokeAgentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'invoke_agent',
+ });
+ expect(invokeAgentSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.invoke_agent' });
+ expect(invokeAgentSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.langgraph' });
+ expect(invokeAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'weather_assistant',
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_PIPELINE_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'weather_assistant',
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the weather in SF?"}]',
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'mock-model',
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(invokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 30,
+ });
})
.start(signal);
await runner.makeRequest('get', '/');
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/index.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/index.ts
index 8b21b479ce80..0a50b60c43a6 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/index.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/index.ts
@@ -15,6 +15,7 @@ export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
+ streamGenAiSpans: true,
}),
{
async fetch(_request, _env, _ctx) {
diff --git a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts
index 1c057e1a986c..76288214e9f2 100644
--- a/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts
+++ b/dev-packages/cloudflare-integration-tests/suites/tracing/openai/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { expect, it } from 'vitest';
import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -23,32 +22,41 @@ it('traces a basic chat completion request', async ({ signal }) => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
- const transactionEvent = envelope[1]?.[0]?.[1];
-
+ // Transaction item (first item in envelope)
+ const transactionEvent = envelope[1]?.[0]?.[1] as any;
expect(transactionEvent.transaction).toBe('GET /');
- expect(transactionEvent.spans).toEqual(
- expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- }),
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- }),
- ]),
- );
+
+ // Span container item (second item in same envelope)
+ const container = envelope[1]?.[1]?.[1] as any;
+ expect(container).toBeDefined();
+
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat gpt-3.5-turbo
+ expect(firstSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(firstSpan!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ expect(firstSpan!.attributes['sentry.origin']).toEqual({ type: 'string', value: 'auto.ai.openai' });
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({ type: 'string', value: 'gpt-3.5-turbo' });
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({ type: 'double', value: 0.7 });
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 10 });
+ expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 15 });
+ expect(firstSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({ type: 'integer', value: 25 });
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
})
.start(signal);
await runner.makeRequest('get', '/');
diff --git a/dev-packages/e2e-tests/test-applications/browser-webworker-vite/package.json b/dev-packages/e2e-tests/test-applications/browser-webworker-vite/package.json
index 71cae14a0120..22489d121695 100644
--- a/dev-packages/e2e-tests/test-applications/browser-webworker-vite/package.json
+++ b/dev-packages/e2e-tests/test-applications/browser-webworker-vite/package.json
@@ -19,7 +19,7 @@
},
"dependencies": {
"@sentry/browser": "file:../../packed/sentry-browser-packed.tgz",
- "@sentry/vite-plugin": "^5.2.0"
+ "@sentry/vite-plugin": "^5.3.0"
},
"volta": {
"node": "20.19.2",
diff --git a/dev-packages/e2e-tests/test-applications/cloudflare-hono/package.json b/dev-packages/e2e-tests/test-applications/cloudflare-hono/package.json
index 5c4cbe43a5d0..4000aa78e5a1 100644
--- a/dev-packages/e2e-tests/test-applications/cloudflare-hono/package.json
+++ b/dev-packages/e2e-tests/test-applications/cloudflare-hono/package.json
@@ -12,7 +12,7 @@
},
"dependencies": {
"@sentry/cloudflare": "file:../../packed/sentry-cloudflare-packed.tgz",
- "hono": "4.12.14"
+ "hono": "4.12.18"
},
"devDependencies": {
"@cloudflare/vitest-pool-workers": "^0.8.31",
diff --git a/dev-packages/e2e-tests/test-applications/cloudflare-hono/src/index.ts b/dev-packages/e2e-tests/test-applications/cloudflare-hono/src/index.ts
index 7cd667c72408..ceba3494d53f 100644
--- a/dev-packages/e2e-tests/test-applications/cloudflare-hono/src/index.ts
+++ b/dev-packages/e2e-tests/test-applications/cloudflare-hono/src/index.ts
@@ -17,7 +17,7 @@ app.get('/error', () => {
});
app.onError((err, ctx) => {
- console.error(`Error occured: ${err.message}`);
+ console.error(`Error occurred: ${err.message}`);
return ctx.json({ error: err.message }, 500);
});
diff --git a/dev-packages/e2e-tests/test-applications/debug-id-sourcemaps/package.json b/dev-packages/e2e-tests/test-applications/debug-id-sourcemaps/package.json
index d7599f0e332d..29e816de1116 100644
--- a/dev-packages/e2e-tests/test-applications/debug-id-sourcemaps/package.json
+++ b/dev-packages/e2e-tests/test-applications/debug-id-sourcemaps/package.json
@@ -15,7 +15,7 @@
"devDependencies": {
"rollup": "^4.35.0",
"vitest": "^0.34.6",
- "@sentry/rollup-plugin": "^5.2.0"
+ "@sentry/rollup-plugin": "^5.3.0"
},
"volta": {
"extends": "../../package.json"
diff --git a/dev-packages/e2e-tests/test-applications/hono-4/src/route-groups/test-middleware.ts b/dev-packages/e2e-tests/test-applications/hono-4/src/route-groups/test-middleware.ts
index 49ca50c591bf..d82201b7cdb3 100644
--- a/dev-packages/e2e-tests/test-applications/hono-4/src/route-groups/test-middleware.ts
+++ b/dev-packages/e2e-tests/test-applications/hono-4/src/route-groups/test-middleware.ts
@@ -7,6 +7,7 @@ middlewareRoutes.get('/named', c => c.json({ middleware: 'named' }));
middlewareRoutes.get('/anonymous', c => c.json({ middleware: 'anonymous' }));
middlewareRoutes.get('/multi', c => c.json({ middleware: 'multi' }));
middlewareRoutes.get('/error', c => c.text('should not reach'));
+middlewareRoutes.get('/param/:id', c => c.json({ paramId: c.req.param('id') }));
// Self-contained sub-app registering its own middleware via .use()
const subAppWithMiddleware = new Hono();
@@ -18,6 +19,7 @@ subAppWithMiddleware.use('/anonymous/*', async (c, next) => {
});
subAppWithMiddleware.use('/multi/*', middlewareA, middlewareB);
subAppWithMiddleware.use('/error/*', failingMiddleware);
+subAppWithMiddleware.use('/param/*', middlewareA);
// .all() handler (1 parameter) — should NOT be wrapped as middleware by patchRoute.
subAppWithMiddleware.all('/all-handler', async function allCatchAll(c) {
diff --git a/dev-packages/e2e-tests/test-applications/hono-4/src/routes.ts b/dev-packages/e2e-tests/test-applications/hono-4/src/routes.ts
index cfb13146b6f7..f1f9c3a783b3 100644
--- a/dev-packages/e2e-tests/test-applications/hono-4/src/routes.ts
+++ b/dev-packages/e2e-tests/test-applications/hono-4/src/routes.ts
@@ -34,6 +34,7 @@ export function addRoutes(app: Hono<{ Bindings?: { E2E_TEST_DSN: string } }>): v
});
app.use('/test-middleware/multi/*', middlewareA, middlewareB);
app.use('/test-middleware/error/*', failingMiddleware);
+ app.use('/test-middleware/param/*', middlewareA);
app.route('/test-middleware', middlewareRoutes);
// Sub-app middleware: registered on the sub-app, wrapped at mount time by route() patching
diff --git a/dev-packages/e2e-tests/test-applications/hono-4/tests/errors.test.ts b/dev-packages/e2e-tests/test-applications/hono-4/tests/errors.test.ts
index 98c81d30afeb..8ee1f13f0c0a 100644
--- a/dev-packages/e2e-tests/test-applications/hono-4/tests/errors.test.ts
+++ b/dev-packages/e2e-tests/test-applications/hono-4/tests/errors.test.ts
@@ -147,6 +147,7 @@ test.describe('middleware errors', () => {
expect(errorEvent.exception?.values?.[0]?.value).toBe('Service Unavailable from middleware');
expect(errorEvent.exception?.values?.[0]?.mechanism?.type).toBe('auto.middleware.hono');
expect(errorEvent.exception?.values?.[0]?.mechanism?.handled).toBe(false);
+ expect(errorEvent.transaction).toBe('GET /test-errors/middleware-http-exception');
const transaction = await transactionPromise;
const middlewareSpan = (transaction.spans || []).find(s => s.op === 'middleware.hono');
@@ -183,7 +184,7 @@ test.describe('middleware errors', () => {
const transaction = await transactionPromise;
if (RUNTIME === 'cloudflare') {
- expect(transaction.transaction).toBe('GET /test-errors/middleware-http-exception-4xx/*');
+ expect(transaction.transaction).toBe('GET /test-errors/middleware-http-exception-4xx');
const middlewareSpan = (transaction.spans || []).find(s => s.op === 'middleware.hono');
expect(middlewareSpan?.status).not.toBe('internal_error');
diff --git a/dev-packages/e2e-tests/test-applications/hono-4/tests/middleware.test.ts b/dev-packages/e2e-tests/test-applications/hono-4/tests/middleware.test.ts
index d984ac0d38a8..c4bf34874f2b 100644
--- a/dev-packages/e2e-tests/test-applications/hono-4/tests/middleware.test.ts
+++ b/dev-packages/e2e-tests/test-applications/hono-4/tests/middleware.test.ts
@@ -116,6 +116,9 @@ for (const { name, prefix } of SCENARIOS) {
type: 'auto.middleware.hono',
}),
);
+
+ // The transaction name on the error event determines the culprit shown in Sentry.
+ expect(errorEvent.transaction).toBe(`GET ${prefix}/error`);
});
test('sets error status on middleware span when middleware throws', async ({ baseURL }) => {
@@ -126,7 +129,7 @@ for (const { name, prefix } of SCENARIOS) {
await fetch(`${baseURL}${prefix}/error`);
const transaction = await transactionPromise;
- expect(transaction.transaction).toBe(`GET ${prefix}/error/*`);
+ expect(transaction.transaction).toBe(`GET ${prefix}/error`);
const spans = transaction.spans || [];
@@ -138,6 +141,25 @@ for (const { name, prefix } of SCENARIOS) {
expect(failingSpan?.status).toBe('internal_error');
});
+ test('uses parameterized route in transaction name', async ({ baseURL }) => {
+ const transactionPromise = waitForTransaction(APP_NAME, event => {
+ return event.contexts?.trace?.op === 'http.server' && !!event.transaction?.includes(`${prefix}/param/`);
+ });
+
+ const response = await fetch(`${baseURL}${prefix}/param/42`);
+ expect(response.status).toBe(200);
+
+ const transaction = await transactionPromise;
+ expect(transaction.transaction).toBe(`GET ${prefix}/param/:id`);
+
+ const spans = transaction.spans || [];
+ const middlewareSpan = spans.find(
+ (span: { description?: string; op?: string }) =>
+ span.op === 'middleware.hono' && span.description === 'middlewareA',
+ );
+ expect(middlewareSpan).toBeDefined();
+ });
+
test('includes request data on error events from middleware', async ({ baseURL }) => {
const errorPromise = waitForError(APP_NAME, event => {
return event.exception?.values?.[0]?.value === 'Middleware error' && !!event.request?.url?.includes(prefix);
diff --git a/dev-packages/e2e-tests/test-applications/hydrogen-react-router-7/package.json b/dev-packages/e2e-tests/test-applications/hydrogen-react-router-7/package.json
index 56789ec7cedb..d8fed819c425 100644
--- a/dev-packages/e2e-tests/test-applications/hydrogen-react-router-7/package.json
+++ b/dev-packages/e2e-tests/test-applications/hydrogen-react-router-7/package.json
@@ -16,7 +16,7 @@
"dependencies": {
"@sentry/cloudflare": "file:../../packed/sentry-cloudflare-packed.tgz",
"@sentry/react-router": "file:../../packed/sentry-react-router-packed.tgz",
- "@sentry/vite-plugin": "^5.2.0",
+ "@sentry/vite-plugin": "^5.3.0",
"@shopify/hydrogen": "2025.5.0",
"@shopify/remix-oxygen": "^3.0.0",
"graphql": "^16.10.0",
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/package.json b/dev-packages/e2e-tests/test-applications/nextjs-15/package.json
index acbe56d0b5f1..25fc16f8702e 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-15/package.json
+++ b/dev-packages/e2e-tests/test-applications/nextjs-15/package.json
@@ -20,7 +20,7 @@
"@types/react": "18.0.26",
"@types/react-dom": "18.0.9",
"ai": "^3.0.0",
- "next": "15.5.15",
+ "next": "15.5.18",
"react": "latest",
"react-dom": "latest",
"typescript": "~5.0.0",
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/sentry.server.config.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/sentry.server.config.ts
index 270c7244ad17..e5306a66e579 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-15/sentry.server.config.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-15/sentry.server.config.ts
@@ -11,4 +11,5 @@ Sentry.init({
bufferSize: 1000,
},
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-error.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-error.test.ts
index a8c39ec032ec..81bf9d04ba97 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-error.test.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-error.test.ts
@@ -1,11 +1,16 @@
import { expect, test } from '@playwright/test';
-import { waitForTransaction, waitForError } from '@sentry-internal/test-utils';
+import { getSpanOp, waitForError, waitForStreamedSpans, waitForTransaction } from '@sentry-internal/test-utils';
test('should create AI spans with correct attributes and error linking', async ({ page }) => {
const aiTransactionPromise = waitForTransaction('nextjs-15', async transactionEvent => {
return transactionEvent.transaction === 'GET /ai-error-test';
});
+ // gen_ai spans are extracted into a separate span v2 envelope item
+ const genAiSpansPromise = waitForStreamedSpans('nextjs-15', spans =>
+ spans.some(span => getSpanOp(span) === 'gen_ai.invoke_agent'),
+ );
+
const errorEventPromise = waitForError('nextjs-15', async errorEvent => {
return errorEvent.exception?.values?.[0]?.value?.includes('Tool call failed');
});
@@ -13,21 +18,20 @@ test('should create AI spans with correct attributes and error linking', async (
await page.goto('/ai-error-test');
const aiTransaction = await aiTransactionPromise;
+ const genAiSpans = await genAiSpansPromise;
const errorEvent = await errorEventPromise;
expect(aiTransaction).toBeDefined();
expect(aiTransaction.transaction).toBe('GET /ai-error-test');
- const spans = aiTransaction.spans || [];
-
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
// Plus a span for the tool call
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
// because of this, only spans that are manually opted-in at call time will be captured
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
- const aiPipelineSpans = spans.filter(span => span.op === 'gen_ai.invoke_agent');
- const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_content');
- const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');
+ const aiPipelineSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.invoke_agent');
+ const aiGenerateSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.generate_content');
+ const toolCallSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.execute_tool');
expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts
index 42c21e4f8c80..b76ea6eb9ff9 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts
@@ -1,29 +1,33 @@
import { expect, test } from '@playwright/test';
-import { waitForTransaction } from '@sentry-internal/test-utils';
+import { getSpanOp, waitForStreamedSpans, waitForTransaction } from '@sentry-internal/test-utils';
test('should create AI spans with correct attributes', async ({ page }) => {
const aiTransactionPromise = waitForTransaction('nextjs-15', async transactionEvent => {
return transactionEvent.transaction === 'GET /ai-test';
});
+ // gen_ai spans are extracted into a separate span v2 envelope item
+ const genAiSpansPromise = waitForStreamedSpans('nextjs-15', spans =>
+ spans.some(span => getSpanOp(span) === 'gen_ai.invoke_agent'),
+ );
+
await page.goto('/ai-test');
const aiTransaction = await aiTransactionPromise;
+ const genAiSpans = await genAiSpansPromise;
expect(aiTransaction).toBeDefined();
expect(aiTransaction.transaction).toBe('GET /ai-test');
- const spans = aiTransaction.spans || [];
-
// We expect spans for the first 3 AI calls (4th is disabled)
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
// Plus a span for the tool call
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
// because of this, only spans that are manually opted-in at call time will be captured
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
- const aiPipelineSpans = spans.filter(span => span.op === 'gen_ai.invoke_agent');
- const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_content');
- const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');
+ const aiPipelineSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.invoke_agent');
+ const aiGenerateSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.generate_content');
+ const toolCallSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.execute_tool');
expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
@@ -31,35 +35,35 @@ test('should create AI spans with correct attributes', async ({ page }) => {
// First AI call - should have telemetry enabled and record inputs/outputs (sendDefaultPii: true)
/* const firstPipelineSpan = aiPipelineSpans[0];
- expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id');
- expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider');
- expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?');
- expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!');
- expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
- expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.model.id']?.value).toBe('mock-model-id');
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.model.provider']?.value).toBe('mock-provider');
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.prompt']?.value).toContain('Where is the first span?');
+ expect(firstPipelineSpan?.attributes?.['gen_ai.output.messages']?.value).toContain('First span here!');
+ expect(firstPipelineSpan?.attributes?.['gen_ai.usage.input_tokens']?.value).toBe(10);
+ expect(firstPipelineSpan?.attributes?.['gen_ai.usage.output_tokens']?.value).toBe(20); */
// Second AI call - explicitly enabled telemetry
const secondPipelineSpan = aiPipelineSpans[0];
- expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?');
- expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!');
+ expect(secondPipelineSpan?.attributes?.['vercel.ai.prompt']?.value).toContain('Where is the second span?');
+ expect(secondPipelineSpan?.attributes?.['gen_ai.output.messages']?.value).toContain('Second span here!');
// Third AI call - with tool calls
/* const thirdPipelineSpan = aiPipelineSpans[2];
- expect(thirdPipelineSpan?.data?.['vercel.ai.response.finishReason']).toBe('tool-calls');
- expect(thirdPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(15);
- expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */
+ expect(thirdPipelineSpan?.attributes?.['vercel.ai.response.finishReason']?.value).toBe('tool-calls');
+ expect(thirdPipelineSpan?.attributes?.['gen_ai.usage.input_tokens']?.value).toBe(15);
+ expect(thirdPipelineSpan?.attributes?.['gen_ai.usage.output_tokens']?.value).toBe(25); */
// Tool call span
/* const toolSpan = toolCallSpans[0];
- expect(toolSpan?.data?.['vercel.ai.toolCall.name']).toBe('getWeather');
- expect(toolSpan?.data?.['vercel.ai.toolCall.id']).toBe('call-1');
- expect(toolSpan?.data?.['vercel.ai.toolCall.args']).toContain('San Francisco');
- expect(toolSpan?.data?.['vercel.ai.toolCall.result']).toContain('Sunny, 72°F'); */
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.name']?.value).toBe('getWeather');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.id']?.value).toBe('call-1');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.args']?.value).toContain('San Francisco');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.result']?.value).toContain('Sunny, 72°F'); */
// Verify the fourth call was not captured (telemetry disabled)
- const promptsInSpans = spans
- .map(span => span.data?.['vercel.ai.prompt'])
- .filter((prompt): prompt is string => prompt !== undefined);
+ const promptsInSpans = genAiSpans
+ .map(span => span.attributes?.['vercel.ai.prompt']?.value)
+ .filter((prompt): prompt is string => typeof prompt === 'string');
const hasDisabledPrompt = promptsInSpans.some(prompt => prompt.includes('Where is the third span?'));
expect(hasDisabledPrompt).toBe(false);
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16-streaming/package.json b/dev-packages/e2e-tests/test-applications/nextjs-16-streaming/package.json
index 8e254f4b4657..4fbc3bf64c27 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-16-streaming/package.json
+++ b/dev-packages/e2e-tests/test-applications/nextjs-16-streaming/package.json
@@ -19,7 +19,7 @@
"@vercel/queue": "^0.1.3",
"ai": "^3.0.0",
"import-in-the-middle": "^2",
- "next": "16.2.4",
+ "next": "16.2.6",
"react": "19.1.0",
"react-dom": "19.1.0",
"require-in-the-middle": "^8",
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/sentry.server.config.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/sentry.server.config.ts
index d7015bce4a30..8428c75580ba 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-16/sentry.server.config.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-16/sentry.server.config.ts
@@ -9,6 +9,7 @@ Sentry.init({
sendDefaultPii: true,
// debug: true,
integrations: [Sentry.vercelAIIntegration(), Sentry.nodeRuntimeMetricsIntegration({ collectionIntervalMs: 1_000 })],
+ streamGenAiSpans: true,
// Verify Log type is available
beforeSendLog(log: Log) {
return log;
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-error.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-error.test.ts
index 39e76bab0dde..62e6798773bd 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-error.test.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-error.test.ts
@@ -1,11 +1,16 @@
import { expect, test } from '@playwright/test';
-import { waitForTransaction, waitForError } from '@sentry-internal/test-utils';
+import { getSpanOp, waitForError, waitForStreamedSpans, waitForTransaction } from '@sentry-internal/test-utils';
test('should create AI spans with correct attributes and error linking', async ({ page }) => {
const aiTransactionPromise = waitForTransaction('nextjs-16', async transactionEvent => {
return transactionEvent.transaction === 'GET /ai-error-test';
});
+ // gen_ai spans are extracted into a separate span v2 envelope item
+ const genAiSpansPromise = waitForStreamedSpans('nextjs-16', spans =>
+ spans.some(span => getSpanOp(span) === 'gen_ai.invoke_agent'),
+ );
+
const errorEventPromise = waitForError('nextjs-16', async errorEvent => {
return errorEvent.exception?.values?.[0]?.value?.includes('Tool call failed');
});
@@ -13,21 +18,20 @@ test('should create AI spans with correct attributes and error linking', async (
await page.goto('/ai-error-test');
const aiTransaction = await aiTransactionPromise;
+ const genAiSpans = await genAiSpansPromise;
const errorEvent = await errorEventPromise;
expect(aiTransaction).toBeDefined();
expect(aiTransaction.transaction).toBe('GET /ai-error-test');
- const spans = aiTransaction.spans || [];
-
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
// Plus a span for the tool call
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
// because of this, only spans that are manually opted-in at call time will be captured
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
- const aiPipelineSpans = spans.filter(span => span.op === 'gen_ai.invoke_agent');
- const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_content');
- const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');
+ const aiPipelineSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.invoke_agent');
+ const aiGenerateSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.generate_content');
+ const toolCallSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.execute_tool');
expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
diff --git a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts
index dcd129020035..89af644b1f21 100644
--- a/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts
+++ b/dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts
@@ -1,29 +1,33 @@
import { expect, test } from '@playwright/test';
-import { waitForTransaction } from '@sentry-internal/test-utils';
+import { getSpanOp, waitForStreamedSpans, waitForTransaction } from '@sentry-internal/test-utils';
test('should create AI spans with correct attributes', async ({ page }) => {
const aiTransactionPromise = waitForTransaction('nextjs-16', async transactionEvent => {
return transactionEvent.transaction === 'GET /ai-test';
});
+ // gen_ai spans are extracted into a separate span v2 envelope item
+ const genAiSpansPromise = waitForStreamedSpans('nextjs-16', spans =>
+ spans.some(span => getSpanOp(span) === 'gen_ai.invoke_agent'),
+ );
+
await page.goto('/ai-test');
const aiTransaction = await aiTransactionPromise;
+ const genAiSpans = await genAiSpansPromise;
expect(aiTransaction).toBeDefined();
expect(aiTransaction.transaction).toBe('GET /ai-test');
- const spans = aiTransaction.spans || [];
-
// We expect spans for the first 3 AI calls (4th is disabled)
// Each generateText call should create 2 spans: one for the pipeline and one for doGenerate
// Plus a span for the tool call
// TODO: For now, this is sadly not fully working - the monkey patching of the ai package is not working
// because of this, only spans that are manually opted-in at call time will be captured
// this may be fixed by https://github.com/vercel/ai/pull/6716 in the future
- const aiPipelineSpans = spans.filter(span => span.op === 'gen_ai.invoke_agent');
- const aiGenerateSpans = spans.filter(span => span.op === 'gen_ai.generate_content');
- const toolCallSpans = spans.filter(span => span.op === 'gen_ai.execute_tool');
+ const aiPipelineSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.invoke_agent');
+ const aiGenerateSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.generate_content');
+ const toolCallSpans = genAiSpans.filter(span => getSpanOp(span) === 'gen_ai.execute_tool');
expect(aiPipelineSpans.length).toBeGreaterThanOrEqual(1);
expect(aiGenerateSpans.length).toBeGreaterThanOrEqual(1);
@@ -31,35 +35,35 @@ test('should create AI spans with correct attributes', async ({ page }) => {
// First AI call - should have telemetry enabled and record inputs/outputs (sendDefaultPii: true)
/* const firstPipelineSpan = aiPipelineSpans[0];
- expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id');
- expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider');
- expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?');
- expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!');
- expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
- expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.model.id']?.value).toBe('mock-model-id');
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.model.provider']?.value).toBe('mock-provider');
+ expect(firstPipelineSpan?.attributes?.['vercel.ai.prompt']?.value).toContain('Where is the first span?');
+ expect(firstPipelineSpan?.attributes?.['gen_ai.output.messages']?.value).toContain('First span here!');
+ expect(firstPipelineSpan?.attributes?.['gen_ai.usage.input_tokens']?.value).toBe(10);
+ expect(firstPipelineSpan?.attributes?.['gen_ai.usage.output_tokens']?.value).toBe(20); */
// Second AI call - explicitly enabled telemetry
const secondPipelineSpan = aiPipelineSpans[0];
- expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?');
- expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!');
+ expect(secondPipelineSpan?.attributes?.['vercel.ai.prompt']?.value).toContain('Where is the second span?');
+ expect(secondPipelineSpan?.attributes?.['gen_ai.output.messages']?.value).toContain('Second span here!');
// Third AI call - with tool calls
/* const thirdPipelineSpan = aiPipelineSpans[2];
- expect(thirdPipelineSpan?.data?.['vercel.ai.response.finishReason']).toBe('tool-calls');
- expect(thirdPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(15);
- expect(thirdPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(25); */
+ expect(thirdPipelineSpan?.attributes?.['vercel.ai.response.finishReason']?.value).toBe('tool-calls');
+ expect(thirdPipelineSpan?.attributes?.['gen_ai.usage.input_tokens']?.value).toBe(15);
+ expect(thirdPipelineSpan?.attributes?.['gen_ai.usage.output_tokens']?.value).toBe(25); */
// Tool call span
/* const toolSpan = toolCallSpans[0];
- expect(toolSpan?.data?.['vercel.ai.toolCall.name']).toBe('getWeather');
- expect(toolSpan?.data?.['vercel.ai.toolCall.id']).toBe('call-1');
- expect(toolSpan?.data?.['vercel.ai.toolCall.args']).toContain('San Francisco');
- expect(toolSpan?.data?.['vercel.ai.toolCall.result']).toContain('Sunny, 72°F'); */
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.name']?.value).toBe('getWeather');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.id']?.value).toBe('call-1');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.args']?.value).toContain('San Francisco');
+ expect(toolSpan?.attributes?.['vercel.ai.toolCall.result']?.value).toContain('Sunny, 72°F'); */
// Verify the fourth call was not captured (telemetry disabled)
- const promptsInSpans = spans
- .map(span => span.data?.['vercel.ai.prompt'])
- .filter((prompt): prompt is string => prompt !== undefined);
+ const promptsInSpans = genAiSpans
+ .map(span => span.attributes?.['vercel.ai.prompt']?.value)
+ .filter((prompt): prompt is string => typeof prompt === 'string');
const hasDisabledPrompt = promptsInSpans.some(prompt => prompt.includes('Where is the third span?'));
expect(hasDisabledPrompt).toBe(false);
diff --git a/dev-packages/e2e-tests/test-applications/react-router-6/tsconfig.json b/dev-packages/e2e-tests/test-applications/react-router-6/tsconfig.json
index 4cc95dc2689a..74afe717c988 100644
--- a/dev-packages/e2e-tests/test-applications/react-router-6/tsconfig.json
+++ b/dev-packages/e2e-tests/test-applications/react-router-6/tsconfig.json
@@ -10,7 +10,7 @@
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"module": "esnext",
- "moduleResolution": "node",
+ "moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
diff --git a/dev-packages/e2e-tests/test-applications/react-send-to-sentry/tsconfig.json b/dev-packages/e2e-tests/test-applications/react-send-to-sentry/tsconfig.json
index 4cc95dc2689a..74afe717c988 100644
--- a/dev-packages/e2e-tests/test-applications/react-send-to-sentry/tsconfig.json
+++ b/dev-packages/e2e-tests/test-applications/react-send-to-sentry/tsconfig.json
@@ -10,7 +10,7 @@
"forceConsistentCasingInFileNames": true,
"noFallthroughCasesInSwitch": true,
"module": "esnext",
- "moduleResolution": "node",
+ "moduleResolution": "bundler",
"resolveJsonModule": true,
"isolatedModules": true,
"noEmit": true,
diff --git a/dev-packages/e2e-tests/test-applications/remix-hydrogen/package.json b/dev-packages/e2e-tests/test-applications/remix-hydrogen/package.json
index b51c2868f415..04b32ff7d222 100644
--- a/dev-packages/e2e-tests/test-applications/remix-hydrogen/package.json
+++ b/dev-packages/e2e-tests/test-applications/remix-hydrogen/package.json
@@ -19,7 +19,7 @@
"@remix-run/cloudflare-pages": "^2.17.4",
"@sentry/cloudflare": "file:../../packed/sentry-cloudflare-packed.tgz",
"@sentry/remix": "file:../../packed/sentry-remix-packed.tgz",
- "@sentry/vite-plugin": "^5.2.0",
+ "@sentry/vite-plugin": "^5.3.0",
"@shopify/hydrogen": "2025.4.0",
"@shopify/remix-oxygen": "2.0.10",
"graphql": "^16.6.0",
diff --git a/dev-packages/node-core-integration-tests/utils/runner.ts b/dev-packages/node-core-integration-tests/utils/runner.ts
index d27c65fc81be..69163754c89e 100644
--- a/dev-packages/node-core-integration-tests/utils/runner.ts
+++ b/dev-packages/node-core-integration-tests/utils/runner.ts
@@ -486,7 +486,8 @@ export function createRunner(...paths: string[]) {
if (process.env.DEBUG) log('stderr line', output);
- if (ensureNoErrorOutput) {
+ // Ignore deprecation warnings for this purpose
+ if (ensureNoErrorOutput && !`${output}`.includes('DeprecationWarning:')) {
complete(new Error(`Expected no error output but got: '${output}'`));
}
});
diff --git a/dev-packages/node-integration-tests/package.json b/dev-packages/node-integration-tests/package.json
index 0e3936dd9fb5..a8aa41b3c6a9 100644
--- a/dev-packages/node-integration-tests/package.json
+++ b/dev-packages/node-integration-tests/package.json
@@ -58,7 +58,7 @@
"generic-pool": "^3.9.0",
"graphql": "^16.11.0",
"graphql-tag": "^2.12.6",
- "hono": "^4.12.14",
+ "hono": "^4.12.18",
"http-terminator": "^3.2.0",
"ioredis": "^5.4.1",
"kafkajs": "2.2.4",
diff --git a/dev-packages/node-integration-tests/suites/integrations/console/filter/instrument.mjs b/dev-packages/node-integration-tests/suites/integrations/console/filter/instrument.mjs
new file mode 100644
index 000000000000..16c05ae32ed0
--- /dev/null
+++ b/dev-packages/node-integration-tests/suites/integrations/console/filter/instrument.mjs
@@ -0,0 +1,10 @@
+import * as Sentry from '@sentry/node';
+import { loggingTransport } from '@sentry-internal/node-integration-tests';
+
+Sentry.init({
+ dsn: 'https://public@dsn.ingest.sentry.io/1337',
+ release: '1.0',
+ transport: loggingTransport,
+ defaultIntegrations: false,
+ integrations: [Sentry.consoleIntegration({ filter: ['foo'] })],
+});
diff --git a/dev-packages/node-integration-tests/suites/integrations/console/filter/scenario.mjs b/dev-packages/node-integration-tests/suites/integrations/console/filter/scenario.mjs
new file mode 100644
index 000000000000..5d1e93582c0b
--- /dev/null
+++ b/dev-packages/node-integration-tests/suites/integrations/console/filter/scenario.mjs
@@ -0,0 +1,9 @@
+/* eslint-disable no-console */
+import * as Sentry from '@sentry/node';
+
+console.log('hello');
+console.log('foo');
+console.log('foo2');
+console.log('baz');
+
+Sentry.captureException(new Error('Test Error'));
diff --git a/dev-packages/node-integration-tests/suites/integrations/console/filter/test.ts b/dev-packages/node-integration-tests/suites/integrations/console/filter/test.ts
new file mode 100644
index 000000000000..decbe91dbb00
--- /dev/null
+++ b/dev-packages/node-integration-tests/suites/integrations/console/filter/test.ts
@@ -0,0 +1,44 @@
+import { afterAll, describe, expect } from 'vitest';
+import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../../utils/runner';
+
+describe('Console Integration', () => {
+ afterAll(() => {
+ cleanupChildProcesses();
+ });
+
+ createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
+ test('filters console messages', async () => {
+ const runner = createRunner()
+ .expect({
+ event: {
+ exception: {
+ values: [
+ {
+ value: 'Test Error',
+ },
+ ],
+ },
+ breadcrumbs: [
+ expect.objectContaining({
+ message: 'hello',
+ }),
+ expect.objectContaining({
+ message: 'baz',
+ }),
+ ],
+ },
+ })
+ .start();
+
+ await runner.completed();
+
+ expect(runner.getLogs()).toContainEqual('hello');
+ expect(runner.getLogs()).toContainEqual('baz');
+ expect(runner.getLogs()).not.toContainEqual('foo');
+ expect(runner.getLogs()).not.toContainEqual('foo2');
+
+ // Ensure deprecation warnigns are not included
+ expect(runner.getLogs()).not.toContainEqual(expect.stringMatching('DeprecationWarning'));
+ });
+ });
+});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-no-truncation.mjs
index ce15aad4e8e1..52b29da36f16 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-no-truncation.mjs
@@ -21,4 +21,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming-with-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming-with-truncation.mjs
index 9d8360708ab3..d72a9fce5121 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming-with-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming-with-truncation.mjs
@@ -13,4 +13,5 @@ Sentry.init({
enableTruncation: true,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming.mjs
index 48a860c510c5..7db8f66f7c58 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-streaming.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-options.mjs
index bbbefef79148..37199cf9030b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-options.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-options.mjs
@@ -20,4 +20,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-pii.mjs
index 8c6bbcc3ce0a..fe6a18ba32dc 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument.mjs
index 2b8a197791e2..41b37b7fc409 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts
index e740c24071fd..5071e62af9d4 100644
--- a/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/anthropic/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { afterAll, describe, expect } from 'vitest';
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
@@ -30,305 +29,14 @@ describe('Anthropic integration', () => {
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic message completion without PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- // Second span - error handling
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'internal_error',
- }),
- // Third span - token counting (no response.text because recordOutputs=false by default)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- // Fourth span - models.retrieve
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- }),
- description: 'models claude-3-haiku-20240307',
- op: 'gen_ai.models',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- ]),
};
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic message completion with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_mock123',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from Anthropic mock!',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'http.request.method': 'POST',
- 'http.request.method_original': 'POST',
- 'http.response.status_code': 200,
- 'otel.kind': 'CLIENT',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch',
- 'url.path': '/anthropic/v1/messages',
- 'url.query': '',
- 'url.scheme': 'http',
- }),
- op: 'http.client',
- origin: 'auto.http.otel.node_fetch',
- status: 'ok',
- }),
-
- // Second - error handling with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'internal_error',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'http.request.method': 'POST',
- 'http.request.method_original': 'POST',
- 'http.response.status_code': 404,
- 'otel.kind': 'CLIENT',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch',
- 'url.path': '/anthropic/v1/messages',
- 'url.query': '',
- 'url.scheme': 'http',
- }),
- op: 'http.client',
- origin: 'auto.http.otel.node_fetch',
- status: 'not_found',
- }),
-
- // Third - token counting with PII (response.text is present because sendDefaultPii=true enables recordOutputs)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'http.request.method': 'POST',
- 'http.request.method_original': 'POST',
- 'http.response.status_code': 200,
- 'otel.kind': 'CLIENT',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch',
- 'url.path': '/anthropic/v1/messages/count_tokens',
- 'url.query': '',
- 'url.scheme': 'http',
- }),
- op: 'http.client',
- origin: 'auto.http.otel.node_fetch',
- status: 'ok',
- }),
-
- // Fourth - models.retrieve with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.models',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- }),
- description: 'models claude-3-haiku-20240307',
- op: 'gen_ai.models',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'http.request.method': 'GET',
- 'http.request.method_original': 'GET',
- 'http.response.status_code': 200,
- 'otel.kind': 'CLIENT',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch',
- 'url.path': '/anthropic/v1/models/claude-3-haiku-20240307',
- 'url.query': '',
- 'url.scheme': 'http',
- 'user_agent.original': 'Anthropic/JS 0.63.0',
- }),
- op: 'http.client',
- origin: 'auto.http.otel.node_fetch',
- status: 'ok',
- }),
-
- // Fifth - messages.create with stream: true
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream123',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'http.request.method': 'POST',
- 'http.request.method_original': 'POST',
- 'http.response.status_code': 200,
- 'otel.kind': 'CLIENT',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'http.client',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.http.otel.node_fetch',
- 'url.path': '/anthropic/v1/messages',
- 'url.query': '',
- 'url.scheme': 'http',
- 'user_agent.original': 'Anthropic/JS 0.63.0',
- }),
- op: 'http.client',
- origin: 'auto.http.otel.node_fetch',
- status: 'ok',
- }),
-
- // Sixth - messages.stream
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- ]),
};
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
transaction: 'main',
- spans: expect.arrayContaining([
- // Check that custom options are respected
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- }),
- }),
- // Check token counting with options
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '15', // Present because recordOutputs=true is set in options
- }),
- op: 'gen_ai.chat',
- }),
- // Check models.retrieve with options
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'models',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- }),
- op: 'gen_ai.models',
- description: 'models claude-3-haiku-20240307',
- }),
- ]),
};
const EXPECTED_MODEL_ERROR = {
@@ -351,35 +59,93 @@ describe('Anthropic integration', () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const completionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_mock123',
+ );
+ expect(completionSpan).toBeDefined();
+ expect(completionSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(completionSpan!.status).toBe('ok');
+ expect(completionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(completionSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(completionSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(completionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(completionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(completionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(completionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(completionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(errorSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+
+ const tokenCountingSpan = container.items.find(
+ span =>
+ span.name === 'chat claude-3-haiku-20240307' &&
+ span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE] === undefined,
+ );
+ expect(tokenCountingSpan).toBeDefined();
+ expect(tokenCountingSpan!.status).toBe('ok');
+ expect(tokenCountingSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(tokenCountingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+
+ const modelsSpan = container.items.find(span => span.name === 'models claude-3-haiku-20240307');
+ expect(modelsSpan).toBeDefined();
+ expect(modelsSpan!.status).toBe('ok');
+ expect(modelsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('models');
+ expect(modelsSpan!.attributes['sentry.op'].value).toBe('gen_ai.models');
+ expect(modelsSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(modelsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(modelsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(modelsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(modelsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ },
+ })
.start()
.completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario-with-response.mjs', 'instrument.mjs', (createRunner, test) => {
- const chatSpan = (responseId: string) =>
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: responseId,
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- status: 'ok',
- });
-
test('preserves .withResponse() and .asResponse() for non-streaming and streaming', async () => {
await createRunner()
.ignore('event')
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- chatSpan('msg_withresponse'),
- chatSpan('msg_withresponse'),
- chatSpan('msg_stream_withresponse'),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const nonStreamingSpans = container.items.filter(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_withresponse',
+ );
+ expect(nonStreamingSpans).toHaveLength(2);
+ for (const span of nonStreamingSpans) {
+ expect(span.name).toBe('chat claude-3-haiku-20240307');
+ expect(span.status).toBe('ok');
+ expect(span.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ }
+
+ const streamingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_stream_withresponse',
+ );
+ expect(streamingSpan).toBeDefined();
+ expect(streamingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(streamingSpan!.status).toBe('ok');
+ expect(streamingSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
},
})
.start()
@@ -392,6 +158,43 @@ describe('Anthropic integration', () => {
await createRunner()
.expect({ event: EXPECTED_MODEL_ERROR })
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(5);
+ const completionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_mock123',
+ );
+ expect(completionSpan).toBeDefined();
+ expect(completionSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(completionSpan!.status).toBe('ok');
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+
+ const tokenCountingSpan = container.items.find(
+ span =>
+ span.name === 'chat claude-3-haiku-20240307' &&
+ span.status === 'ok' &&
+ span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE] === undefined,
+ );
+ expect(tokenCountingSpan).toBeDefined();
+ expect(tokenCountingSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+
+ const modelsSpan = container.items.find(span => span.name === 'models claude-3-haiku-20240307');
+ expect(modelsSpan).toBeDefined();
+ expect(modelsSpan!.status).toBe('ok');
+ expect(modelsSpan!.attributes['sentry.op'].value).toBe('gen_ai.models');
+
+ const streamingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_stream123',
+ );
+ expect(streamingSpan).toBeDefined();
+ expect(streamingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(streamingSpan!.status).toBe('ok');
+ expect(streamingSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ },
+ })
.expect({ event: EXPECTED_STREAM_EVENT_HANDLER_MESSAGE })
.start()
.completed();
@@ -403,6 +206,72 @@ describe('Anthropic integration', () => {
await createRunner()
.expect({ event: EXPECTED_MODEL_ERROR })
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(5);
+ const completionSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"What is the capital of France?"}]',
+ );
+ expect(completionSpan).toBeDefined();
+ expect(completionSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(completionSpan!.status).toBe('ok');
+ expect(completionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(completionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(completionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(completionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('msg_mock123');
+ expect(completionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toBe('Hello from Anthropic mock!');
+ expect(completionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(completionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(completionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(completionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+ expect(completionSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(completionSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+
+ const errorSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"This will fail"}]',
+ );
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.name).toBe('chat error-model');
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+
+ const tokenCountingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]?.value === '15',
+ );
+ expect(tokenCountingSpan).toBeDefined();
+ expect(tokenCountingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(tokenCountingSpan!.status).toBe('ok');
+ expect(tokenCountingSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+
+ const modelsSpan = container.items.find(span => span.name === 'models claude-3-haiku-20240307');
+ expect(modelsSpan).toBeDefined();
+ expect(modelsSpan!.status).toBe('ok');
+ expect(modelsSpan!.attributes['sentry.op'].value).toBe('gen_ai.models');
+
+ // TODO: messages.stream() should produce its own distinct gen_ai span, but it
+ // currently does not (pre-existing bug). Once fixed, add an additional indexed span assertion.
+ const streamingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_stream123',
+ );
+ expect(streamingSpan).toBeDefined();
+ expect(streamingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(streamingSpan!.status).toBe('ok');
+ expect(streamingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(streamingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(streamingSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE].value).toBe(true);
+ expect(streamingSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(streamingSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toBe('Hello from stream!');
+ expect(streamingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(streamingSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(streamingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+ },
+ })
.expect({ event: EXPECTED_STREAM_EVENT_HANDLER_MESSAGE })
.start()
.completed();
@@ -414,6 +283,52 @@ describe('Anthropic integration', () => {
await createRunner()
.expect({ event: EXPECTED_MODEL_ERROR })
.expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(5);
+ const completionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_mock123',
+ );
+ expect(completionSpan).toBeDefined();
+ expect(completionSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(completionSpan!.status).toBe('ok');
+ expect(completionSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(completionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(completionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+
+ const tokenCountingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]?.value === '15',
+ );
+ expect(tokenCountingSpan).toBeDefined();
+ expect(tokenCountingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(tokenCountingSpan!.status).toBe('ok');
+ expect(tokenCountingSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(tokenCountingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+
+ const modelsSpan = container.items.find(span => span.name === 'models claude-3-haiku-20240307');
+ expect(modelsSpan).toBeDefined();
+ expect(modelsSpan!.status).toBe('ok');
+ expect(modelsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('models');
+ expect(modelsSpan!.attributes['sentry.op'].value).toBe('gen_ai.models');
+ expect(modelsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(modelsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(modelsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(modelsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+
+ const streamingSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'msg_stream123',
+ );
+ expect(streamingSpan).toBeDefined();
+ expect(streamingSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(streamingSpan!.status).toBe('ok');
+ expect(streamingSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ },
+ })
.expect({ event: EXPECTED_STREAM_EVENT_HANDLER_MESSAGE })
.start()
.completed();
@@ -422,101 +337,106 @@ describe('Anthropic integration', () => {
const EXPECTED_STREAM_SPANS_PII_FALSE = {
transaction: 'main',
- spans: expect.arrayContaining([
- // messages.create with stream: true
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["end_turn"]',
- }),
- }),
- // messages.stream
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- }),
- // messages.stream with redundant stream: true param
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'msg_stream_1',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- }),
- ]),
};
const EXPECTED_STREAM_SPANS_PII_TRUE = {
transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- // streamed text concatenated
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!',
- }),
- }),
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!',
- }),
- }),
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from stream!',
- }),
- }),
- ]),
};
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument.mjs', (createRunner, test) => {
test('streams produce spans with token usage and metadata (PII false)', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_FALSE }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: EXPECTED_STREAM_SPANS_PII_FALSE })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const requestStreamSpans = container.items.filter(
+ span => span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(requestStreamSpans).toHaveLength(2);
+ for (const span of requestStreamSpans) {
+ expect(span.name).toBe('chat claude-3-haiku-20240307');
+ expect(span.status).toBe('ok');
+ expect(span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE].value).toBe(true);
+ expect(span.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('msg_stream_1');
+ }
+
+ const detailedStreamSpan = requestStreamSpans.find(
+ span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["end_turn"]',
+ );
+ expect(detailedStreamSpan).toBeDefined();
+ expect(detailedStreamSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(detailedStreamSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(detailedStreamSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe(
+ 'claude-3-haiku-20240307',
+ );
+ expect(detailedStreamSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe(
+ 'claude-3-haiku-20240307',
+ );
+ expect(detailedStreamSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(detailedStreamSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(detailedStreamSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const messagesStreamSpan = container.items.find(
+ span => span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(messagesStreamSpan).toBeDefined();
+ expect(messagesStreamSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(messagesStreamSpan!.status).toBe('ok');
+ expect(messagesStreamSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(messagesStreamSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(messagesStreamSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe(
+ 'claude-3-haiku-20240307',
+ );
+ expect(messagesStreamSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(messagesStreamSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe(
+ 'claude-3-haiku-20240307',
+ );
+ expect(messagesStreamSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('msg_stream_1');
+ expect(messagesStreamSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(messagesStreamSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(messagesStreamSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+ },
+ })
+ .start()
+ .completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario-stream.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('streams record response text when PII true', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_SPANS_PII_TRUE }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: EXPECTED_STREAM_SPANS_PII_TRUE })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const requestStreamSpans = container.items.filter(
+ span => span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(requestStreamSpans).toHaveLength(2);
+ for (const span of requestStreamSpans) {
+ expect(span.name).toBe('chat claude-3-haiku-20240307');
+ expect(span.status).toBe('ok');
+ expect(span.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE].value).toBe(true);
+ expect(span.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toBe('Hello from stream!');
+ }
+
+ const messagesStreamSpan = container.items.find(
+ span => span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(messagesStreamSpan).toBeDefined();
+ expect(messagesStreamSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(messagesStreamSpan!.status).toBe('ok');
+ expect(messagesStreamSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(messagesStreamSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toBe('Hello from stream!');
+ },
+ })
+ .start()
+ .completed();
});
});
@@ -530,16 +450,19 @@ describe('Anthropic integration', () => {
await createRunner()
.ignore('event')
.expect({
- transaction: {
- spans: expect.arrayContaining([
- expect.objectContaining({
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON,
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON,
- }),
- }),
- ]),
+ transaction: {},
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] messages.create with tools — available tools + tool calls recorded with PII
+ expect(firstSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE].value).toBe(EXPECTED_TOOLS_JSON);
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE].value).toBe(EXPECTED_TOOL_CALLS_JSON);
},
})
.start()
@@ -557,17 +480,28 @@ describe('Anthropic integration', () => {
await createRunner()
.ignore('event')
.expect({
- transaction: {
- spans: expect.arrayContaining([
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_TOOLS_JSON,
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: EXPECTED_TOOL_CALLS_JSON,
- }),
- }),
- ]),
+ transaction: {},
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const streamingToolSpan = container.items.find(span => span.status === 'ok');
+ expect(streamingToolSpan).toBeDefined();
+ expect(streamingToolSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(streamingToolSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(streamingToolSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE].value).toBe(true);
+ expect(streamingToolSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(streamingToolSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE].value).toBe(
+ EXPECTED_TOOLS_JSON,
+ );
+ expect(streamingToolSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE].value).toBe(
+ EXPECTED_TOOL_CALLS_JSON,
+ );
+
+ const errorSpan = container.items.find(span => span.status === 'error');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
},
})
.start()
@@ -578,97 +512,93 @@ describe('Anthropic integration', () => {
// Additional error scenarios - Streaming errors
const EXPECTED_STREAM_ERROR_SPANS = {
transaction: 'main',
- spans: expect.arrayContaining([
- // Error with messages.create on stream initialization
- expect.objectContaining({
- description: 'chat error-stream-init',
- op: 'gen_ai.chat',
- status: 'internal_error', // Actual status coming from the instrumentation
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- }),
- }),
- // Error with messages.stream on stream initialization
- expect.objectContaining({
- description: 'chat error-stream-init',
- op: 'gen_ai.chat',
- status: 'internal_error', // Actual status coming from the instrumentation
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-init',
- }),
- }),
- // Error midway with messages.create on streaming - note: The stream is started successfully
- // so we get a successful span with the content that was streamed before the error
- expect.objectContaining({
- description: 'chat error-stream-midway',
- op: 'gen_ai.chat',
- status: 'ok',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error
- }),
- }),
- // Error midway with messages.stream - same behavior, we get a span with the streamed data
- expect.objectContaining({
- description: 'chat error-stream-midway',
- op: 'gen_ai.chat',
- status: 'ok',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-stream-midway',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'This stream will ', // We received some data before error
- }),
- }),
- ]),
};
createEsmAndCjsTests(__dirname, 'scenario-stream-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('handles streaming errors correctly', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_STREAM_ERROR_SPANS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: EXPECTED_STREAM_ERROR_SPANS })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const createInitErrorSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]?.value === 'error-stream-init' &&
+ span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(createInitErrorSpan).toBeDefined();
+ expect(createInitErrorSpan!.name).toBe('chat error-stream-init');
+ expect(createInitErrorSpan!.status).toBe('error');
+
+ const streamInitErrorSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]?.value === 'error-stream-init' &&
+ span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(streamInitErrorSpan).toBeDefined();
+ expect(streamInitErrorSpan!.name).toBe('chat error-stream-init');
+ expect(streamInitErrorSpan!.status).toBe('error');
+
+ const createMidwayErrorSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]?.value === 'error-stream-midway' &&
+ span.status === 'ok',
+ );
+ expect(createMidwayErrorSpan).toBeDefined();
+ expect(createMidwayErrorSpan!.name).toBe('chat error-stream-midway');
+ expect(createMidwayErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE].value).toBe(true);
+ expect(createMidwayErrorSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(createMidwayErrorSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toBe('This stream will ');
+
+ const streamMidwayErrorSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]?.value === 'error-stream-midway' &&
+ span.status === 'error',
+ );
+ expect(streamMidwayErrorSpan).toBeDefined();
+ expect(streamMidwayErrorSpan!.name).toBe('chat error-stream-midway');
+ expect(streamMidwayErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ },
+ })
+ .start()
+ .completed();
});
});
// Additional error scenarios - Tool errors and model retrieval errors
const EXPECTED_ERROR_SPANS = {
transaction: 'main',
- spans: expect.arrayContaining([
- // Invalid tool format error
- expect.objectContaining({
- description: 'chat invalid-format',
- op: 'gen_ai.chat',
- status: 'internal_error',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'invalid-format',
- }),
- }),
- // Model retrieval error
- expect.objectContaining({
- description: 'models nonexistent-model',
- op: 'gen_ai.models',
- status: 'internal_error',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'nonexistent-model',
- }),
- }),
- // Successful tool usage (for comparison)
- expect.objectContaining({
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- status: 'ok',
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('tool_ok_1'),
- }),
- }),
- ]),
};
createEsmAndCjsTests(__dirname, 'scenario-errors.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('handles tool errors and model retrieval errors correctly', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_ERROR_SPANS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: EXPECTED_ERROR_SPANS })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const invalidFormatSpan = container.items.find(span => span.name === 'chat invalid-format');
+ expect(invalidFormatSpan).toBeDefined();
+ expect(invalidFormatSpan!.status).toBe('error');
+ expect(invalidFormatSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('invalid-format');
+ expect(invalidFormatSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+
+ const modelErrorSpan = container.items.find(span => span.name === 'models nonexistent-model');
+ expect(modelErrorSpan).toBeDefined();
+ expect(modelErrorSpan!.status).toBe('error');
+ expect(modelErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('nonexistent-model');
+ expect(modelErrorSpan!.attributes['sentry.op'].value).toBe('gen_ai.models');
+
+ const toolSuccessSpan = container.items.find(span => span.name === 'chat claude-3-haiku-20240307');
+ expect(toolSuccessSpan).toBeDefined();
+ expect(toolSuccessSpan!.status).toBe('ok');
+ expect(toolSuccessSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE].value).toContain('tool_ok_1');
+ },
+ })
+ .start()
+ .completed();
});
});
@@ -683,44 +613,43 @@ describe('Anthropic integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- // First call: Last message is large and gets truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- // Messages should be present (truncation happened) and should be a JSON array
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- // Second call: Last message is small and kept without truncation
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- // Small message should be kept intact
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: 'This is a small message that fits within the limit' },
- ]),
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const smallMsgValue = JSON.stringify([
+ { role: 'user', content: 'This is a small message that fits within the limit' },
+ ]);
+ const truncatedSpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ ),
+ );
+ expect(truncatedSpan).toBeDefined();
+ expect(truncatedSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(truncatedSpan!.status).toBe('ok');
+ expect(truncatedSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(truncatedSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(truncatedSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(truncatedSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(truncatedSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(truncatedSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+
+ const smallMessageSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value === smallMsgValue,
+ );
+ expect(smallMessageSpan).toBeDefined();
+ expect(smallMessageSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(smallMessageSpan!.status).toBe('ok');
+ expect(smallMessageSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(smallMessageSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(smallMessageSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(smallMessageSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(smallMessageSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe(
+ 'claude-3-haiku-20240307',
+ );
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
},
})
.start()
@@ -731,43 +660,43 @@ describe('Anthropic integration', () => {
createEsmAndCjsTests(__dirname, 'scenario-media-truncation.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('truncates media attachment, keeping all other details', async () => {
+ const expectedMediaMessages = JSON.stringify([
+ {
+ role: 'user',
+ content: [
+ {
+ type: 'image',
+ source: {
+ type: 'base64',
+ media_type: 'image/png',
+ data: '[Blob substitute]',
+ },
+ },
+ ],
+ },
+ ]);
await createRunner()
.ignore('event')
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.anthropic',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-haiku-20240307',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- // Only the last message (with filtered media) should be kept
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- {
- role: 'user',
- content: [
- {
- type: 'image',
- source: {
- type: 'base64',
- media_type: 'image/png',
- data: '[Blob substitute]',
- },
- },
- ],
- },
- ]),
- }),
- description: 'chat claude-3-haiku-20240307',
- op: 'gen_ai.chat',
- origin: 'auto.ai.anthropic',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] messages.create with media attachment — image data replaced, other fields preserved
+ expect(firstSpan!.name).toBe('chat claude-3-haiku-20240307');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(expectedMediaMessages);
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(firstSpan!.attributes['sentry.origin'].value).toBe('auto.ai.anthropic');
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-haiku-20240307');
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(2);
},
})
.start()
@@ -781,20 +710,21 @@ describe('Anthropic integration', () => {
'instrument-with-pii.mjs',
(createRunner, test) => {
test('extracts system instructions from messages', async () => {
+ const expectedInstructions = JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]);
await createRunner()
.ignore('event')
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] messages.create — system instructions extracted into dedicated attribute
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toBe(expectedInstructions);
},
})
.start()
@@ -808,26 +738,6 @@ describe('Anthropic integration', () => {
const EXPECTED_TRANSACTION_NO_TRUNCATION = {
transaction: 'main',
- spans: expect.arrayContaining([
- // Multiple messages should all be preserved (no popping to last message only)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: longContent },
- { role: 'assistant', content: 'Some reply' },
- { role: 'user', content: 'Follow-up question' },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- // Long string input should not be truncated (messagesFromParams wraps it in an array)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([longStringInput]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- }),
- }),
- ]),
};
createEsmAndCjsTests(
@@ -836,9 +746,31 @@ describe('Anthropic integration', () => {
'instrument-no-truncation.mjs',
(createRunner, test) => {
test('does not truncate input messages when enableTruncation is false', async () => {
+ const expectedAllMessages = JSON.stringify([
+ { role: 'user', content: longContent },
+ { role: 'assistant', content: 'Some reply' },
+ { role: 'user', content: 'Follow-up question' },
+ ]);
+ const expectedLongString = JSON.stringify([longStringInput]);
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const conversationSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value === expectedAllMessages,
+ );
+ expect(conversationSpan).toBeDefined();
+ expect(conversationSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+
+ const longStringSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value === expectedLongString,
+ );
+ expect(longStringSpan).toBeDefined();
+ expect(longStringSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(1);
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-no-truncation.mjs
index be5288b429d6..164490759932 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-no-truncation.mjs
@@ -14,4 +14,5 @@ Sentry.init({
enableTruncation: false,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming-with-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming-with-truncation.mjs
index e706163aea04..8238ba744fcb 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming-with-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming-with-truncation.mjs
@@ -13,4 +13,5 @@ Sentry.init({
enableTruncation: true,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming.mjs
index 48a860c510c5..7db8f66f7c58 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-streaming.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs
index 9823f5680be3..03cef86fecbd 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs
@@ -20,4 +20,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs
index fa0a1136283d..7b17df2fce7f 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs
index 9bcfb96ac103..6c57f1abed6b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts
index 9839ef5fa2c0..f1e9cd333eee 100644
--- a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts
@@ -1,10 +1,9 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { afterAll, describe, expect } from 'vitest';
import {
GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
- GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
+ GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
@@ -29,144 +28,55 @@ describe('Google GenAI integration', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // chat.sendMessage (should get model from context)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro', // Should get from chat context
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'chat gemini-1.5-pro',
- op: 'gen_ai.chat',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // models.generateContent
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // error handling
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'generate_content error-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // chat.sendMessage with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'chat gemini-1.5-pro',
- op: 'gen_ai.chat',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // models.generateContent with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // error handling with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
- }),
- description: 'generate_content error-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_WITH_OPTIONS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Check that custom options are respected
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- }),
- description: expect.not.stringContaining('stream-response'), // Non-streaming span
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates google genai related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'generate_content error-model',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(chatSpan!.attributes['sentry.origin'].value).toBe('auto.ai.google_genai');
+ expect(chatSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('gemini-1.5-pro');
+ expect(chatSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(chatSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(chatSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('gemini-1.5-flash');
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.9);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+
+ const errorSpan = container.items.find(span => span.name === 'generate_content error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ },
+ })
.start()
.completed();
});
@@ -176,7 +86,48 @@ describe('Google GenAI integration', () => {
test('creates google genai related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'generate_content error-model',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(chatSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+ expect(chatSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('gemini-1.5-pro');
+ expect(chatSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(chatSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(chatSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(chatSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.9);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+
+ const errorSpan = container.items.find(span => span.name === 'generate_content error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(errorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
@@ -186,7 +137,31 @@ describe('Google GenAI integration', () => {
test('creates google genai related spans with custom options', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'generate_content error-model',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(chatSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+
+ const errorSpan = container.items.find(span => span.name === 'generate_content error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ },
+ })
.start()
.completed();
});
@@ -195,242 +170,122 @@ describe('Google GenAI integration', () => {
const EXPECTED_AVAILABLE_TOOLS_JSON =
'[{"name":"controlLight","parametersJsonSchema":{"type":"object","properties":{"brightness":{"type":"number"},"colorTemperature":{"type":"string"}},"required":["brightness","colorTemperature"]}}]';
- const EXPECTED_TRANSACTION_TOOLS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Non-streaming with tools
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 23,
- }),
- description: 'generate_content gemini-2.0-flash-001',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // Streaming with tools
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String), // Should include tool calls
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-tools-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
- }),
- description: 'generate_content gemini-2.0-flash-001',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // Without tools for comparison
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-2.0-flash-001',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'generate_content gemini-2.0-flash-001',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-tools.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
test('creates google genai related spans with tool calls', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_TOOLS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const nonStreamingToolsSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]?.value === EXPECTED_AVAILABLE_TOOLS_JSON &&
+ span.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE] === undefined,
+ );
+ expect(nonStreamingToolsSpan).toBeDefined();
+ expect(nonStreamingToolsSpan!.name).toBe('generate_content gemini-2.0-flash-001');
+ expect(nonStreamingToolsSpan!.status).toBe('ok');
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toBeDefined();
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(nonStreamingToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(23);
+
+ const streamingToolsSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]?.value === EXPECTED_AVAILABLE_TOOLS_JSON &&
+ span.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]?.value === true,
+ );
+ expect(streamingToolsSpan).toBeDefined();
+ expect(streamingToolsSpan!.name).toBe('generate_content gemini-2.0-flash-001');
+ expect(streamingToolsSpan!.status).toBe('ok');
+ expect(streamingToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(streamingToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(streamingToolsSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(streamingToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toBeDefined();
+ expect(streamingToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('mock-response-tools-id');
+ expect(streamingToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gemini-2.0-flash-001');
+ expect(streamingToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(streamingToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(streamingToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(22);
+
+ const noToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] === undefined,
+ );
+ expect(noToolsSpan).toBeDefined();
+ expect(noToolsSpan!.name).toBe('generate_content gemini-2.0-flash-001');
+ expect(noToolsSpan!.status).toBe('ok');
+ expect(noToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(noToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(noToolsSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(noToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(noToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(noToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+ },
+ })
+ .start()
+ .completed();
});
});
- const EXPECTED_TRANSACTION_STREAMING = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // models.generateContentStream (streaming)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // chat.sendMessageStream (streaming)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- }),
- description: 'chat gemini-1.5-pro',
- op: 'gen_ai.chat',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // blocked content streaming
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- }),
- description: 'generate_content blocked-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- // error handling for streaming
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- }),
- description: 'generate_content error-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_STREAMING_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // models.generateContentStream (streaming) with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // chat.sendMessageStream (streaming) with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include message when recordInputs: true
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'mock-response-streaming-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gemini-1.5-pro',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["STOP"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 22,
- }),
- description: 'chat gemini-1.5-pro',
- op: 'gen_ai.chat',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // blocked content stream with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'blocked-model',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- }),
- description: 'generate_content blocked-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- // error handling for streaming with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include contents when recordInputs: true
- }),
- description: 'generate_content error-model',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-streaming.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates google genai streaming spans with sendDefaultPii: false', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_STREAMING }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'generate_content blocked-model',
+ 'generate_content error-model',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.9);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe(
+ 'mock-response-streaming-id',
+ );
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gemini-1.5-pro');
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE].value).toBe('["STOP"]');
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(generateContentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(22);
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE].value).toBe('mock-response-streaming-id');
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gemini-1.5-pro');
+
+ const blockedSpan = container.items.find(span => span.name === 'generate_content blocked-model');
+ expect(blockedSpan).toBeDefined();
+ expect(blockedSpan!.status).toBe('error');
+ expect(blockedSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(blockedSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+
+ const errorSpan = container.items.find(span => span.name === 'generate_content error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ },
+ })
+ .start()
+ .completed();
});
});
@@ -438,7 +293,52 @@ describe('Google GenAI integration', () => {
test('creates google genai streaming spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_STREAMING_PII_TRUE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat gemini-1.5-pro',
+ 'generate_content blocked-model',
+ 'generate_content error-model',
+ 'generate_content gemini-1.5-flash',
+ ]);
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content gemini-1.5-flash');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(generateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.9);
+ expect(generateContentSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(generateContentSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE].value).toBe('["STOP"]');
+
+ const chatSpan = container.items.find(span => span.name === 'chat gemini-1.5-pro');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.status).toBe('ok');
+ expect(chatSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(chatSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(chatSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE].value).toBe('["STOP"]');
+
+ const blockedSpan = container.items.find(span => span.name === 'generate_content blocked-model');
+ expect(blockedSpan).toBeDefined();
+ expect(blockedSpan!.status).toBe('error');
+ expect(blockedSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(blockedSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE].value).toBe(true);
+ expect(blockedSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(blockedSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+
+ const errorSpan = container.items.find(span => span.name === 'generate_content error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(errorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
@@ -452,52 +352,36 @@ describe('Google GenAI integration', () => {
test('truncates messages when they exceed byte limit - keeps only last message and crops it', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First call: Last message is large and gets truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- // Messages should be present (truncation happened) and should be a JSON array with parts
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(
- /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/,
- ),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // Second call: Last message is small and kept without truncation
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gemini-1.5-flash',
- // Small message should be kept intact
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- {
- role: 'user',
- parts: [{ text: 'This is a small message that fits within the limit' }],
- },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- description: 'generate_content gemini-1.5-flash',
- op: 'gen_ai.generate_content',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const truncatedSpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(
+ /^\[\{"role":"user","parts":\[\{"text":"C+"\}\]\}\]$/,
+ ),
+ );
+ expect(truncatedSpan).toBeDefined();
+ expect(truncatedSpan!.name).toBe('generate_content gemini-1.5-flash');
+ expect(truncatedSpan!.status).toBe('ok');
+ expect(truncatedSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(truncatedSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+
+ const smallMessageSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ JSON.stringify([
+ {
+ role: 'user',
+ parts: [{ text: 'This is a small message that fits within the limit' }],
+ },
+ ]),
+ );
+ expect(smallMessageSpan).toBeDefined();
+ expect(smallMessageSpan!.name).toBe('generate_content gemini-1.5-flash');
+ expect(smallMessageSpan!.status).toBe('ok');
+ expect(smallMessageSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
},
})
.start()
@@ -514,18 +398,18 @@ describe('Google GenAI integration', () => {
test('extracts system instructions from messages', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] generate_content with system instructions extracted
+ expect(firstSpan!.name).toBe('generate_content gemini-1.5-flash');
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toBe(
+ JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]),
+ );
},
})
.start()
@@ -534,111 +418,40 @@ describe('Google GenAI integration', () => {
},
);
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embedContent with string contents
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
- },
- description: 'embeddings text-embedding-004',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // Second span - embedContent error model
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- // Third span - embedContent with array contents
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
- },
- description: 'embeddings text-embedding-004',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embedContent with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'What is the capital of France?',
- },
- description: 'embeddings text-embedding-004',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- // Second span - embedContent error model with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'This will fail',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'internal_error',
- }),
- // Third span - embedContent with array contents and PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'google_genai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-004',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]:
- '[{"role":"user","parts":[{"text":"First input text"}]},{"role":"user","parts":[{"text":"Second input text"}]}]',
- },
- description: 'embeddings text-embedding-004',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.google_genai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates google genai embeddings spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'embeddings error-model',
+ 'embeddings text-embedding-004',
+ 'embeddings text-embedding-004',
+ ]);
+
+ const successfulSpans = container.items.filter(
+ span => span.name === 'embeddings text-embedding-004' && span.status === 'ok',
+ );
+ expect(successfulSpans).toHaveLength(2);
+ for (const span of successfulSpans) {
+ expect(span.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(span.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+ expect(span.attributes['sentry.origin'].value).toBe('auto.ai.google_genai');
+ expect(span.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+ expect(span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('text-embedding-004');
+ expect(span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toBeUndefined();
+ }
+
+ const errorSpan = container.items.find(span => span.name === 'embeddings error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+ },
+ })
.start()
.completed();
});
@@ -648,7 +461,44 @@ describe('Google GenAI integration', () => {
test('creates google genai embeddings spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'embeddings error-model',
+ 'embeddings text-embedding-004',
+ 'embeddings text-embedding-004',
+ ]);
+
+ const stringInputSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'What is the capital of France?',
+ );
+ expect(stringInputSpan).toBeDefined();
+ expect(stringInputSpan!.name).toBe('embeddings text-embedding-004');
+ expect(stringInputSpan!.status).toBe('ok');
+ expect(stringInputSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+ expect(stringInputSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('google_genai');
+
+ const errorSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'This will fail',
+ );
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.name).toBe('embeddings error-model');
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+
+ const arrayInputSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value ===
+ '[{"role":"user","parts":[{"text":"First input text"}]},{"role":"user","parts":[{"text":"Second input text"}]}]',
+ );
+ expect(arrayInputSpan).toBeDefined();
+ expect(arrayInputSpan!.name).toBe('embeddings text-embedding-004');
+ expect(arrayInputSpan!.status).toBe('ok');
+ expect(arrayInputSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+ },
+ })
.start()
.completed();
});
@@ -656,22 +506,6 @@ describe('Google GenAI integration', () => {
const longContent = 'A'.repeat(50_000);
- const EXPECTED_TRANSACTION_NO_TRUNCATION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', parts: [{ text: longContent }] },
- { role: 'model', parts: [{ text: 'Some reply' }] },
- { role: 'user', parts: [{ text: 'Follow-up question' }] },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
@@ -680,7 +514,24 @@ describe('Google GenAI integration', () => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] generate_content with full (non-truncated) input messages
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ JSON.stringify([
+ { role: 'user', parts: [{ text: longContent }] },
+ { role: 'model', parts: [{ text: 'Some reply' }] },
+ { role: 'user', parts: [{ text: 'Follow-up question' }] },
+ ]),
+ );
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-no-truncation.mjs
index 027299eeacad..8d9a5b45f919 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-no-truncation.mjs
@@ -21,4 +21,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming-with-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming-with-truncation.mjs
index cdfebbf845fc..081d0969e00b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming-with-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming-with-truncation.mjs
@@ -13,4 +13,5 @@ Sentry.init({
enableTruncation: true,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming.mjs
index 48a860c510c5..7db8f66f7c58 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-streaming.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-with-pii.mjs
index 84212d887ee7..2a82a27dbdb4 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument.mjs
index 1fb023b535d4..1d36ee5c06e6 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts
index f85e3187ac78..5ba5a2b20b5f 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/test.ts
@@ -1,8 +1,7 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { afterAll, describe, expect } from 'vitest';
import {
- GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE,
+ GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
GEN_AI_OPERATION_NAME_ATTRIBUTE,
@@ -29,146 +28,60 @@ describe('LangChain integration', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat model with claude-3-5-sonnet
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second span - chat model with claude-3-opus
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-opus-20240229',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third span - error handling
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat model with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second span - chat model with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-opus-20240229',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third span - error handling with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates langchain related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat claude-3-5-sonnet-20241022',
+ 'chat claude-3-opus-20240229',
+ 'chat error-model',
+ ]);
+
+ const sonnetSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(sonnetSpan).toBeDefined();
+ expect(sonnetSpan!.status).toBe('ok');
+ expect(sonnetSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(sonnetSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(sonnetSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(sonnetSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]).toBeDefined();
+
+ const opusSpan = container.items.find(span => span.name === 'chat claude-3-opus-20240229');
+ expect(opusSpan).toBeDefined();
+ expect(opusSpan!.status).toBe('ok');
+ expect(opusSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(opusSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(opusSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-opus-20240229');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.9);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.95);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(200);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(errorSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ },
+ })
.start()
.completed();
});
@@ -176,14 +89,16 @@ describe('LangChain integration', () => {
test('does not create duplicate spans from double module patching', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: event => {
- const spans = event.spans || [];
- const genAiChatSpans = spans.filter(span => span.op === 'gen_ai.chat');
+ span: container => {
// The scenario makes 3 LangChain calls (2 successful + 1 error).
// Without the dedup guard, the file-level and module-level hooks
// both patch the same prototype, producing 6 spans instead of 3.
- expect(genAiChatSpans).toHaveLength(3);
+ expect(container.items).toHaveLength(3);
+ for (const span of container.items) {
+ expect(span.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ }
},
})
.start()
@@ -195,107 +110,92 @@ describe('LangChain integration', () => {
test('creates langchain related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat claude-3-5-sonnet-20241022',
+ 'chat claude-3-opus-20240229',
+ 'chat error-model',
+ ]);
+
+ const sonnetSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(sonnetSpan).toBeDefined();
+ expect(sonnetSpan!.status).toBe('ok');
+ expect(sonnetSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(sonnetSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(sonnetSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(sonnetSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const opusSpan = container.items.find(span => span.name === 'chat claude-3-opus-20240229');
+ expect(opusSpan).toBeDefined();
+ expect(opusSpan!.status).toBe('ok');
+ expect(opusSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-opus-20240229');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.9);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.95);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(200);
+ expect(opusSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(opusSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(opusSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ expect(errorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
});
- const EXPECTED_TRANSACTION_TOOL_CALLS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use',
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-tools.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates langchain spans with tool calls', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_TOOL_CALLS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat with tool_use stop reason
+ expect(firstSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(firstSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(150);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(30);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(50);
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE].value).toBe('tool_use');
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
});
- const EXPECTED_TRANSACTION_MESSAGE_TRUNCATION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First call: String input truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- // Messages should be present and should include truncated string input (contains only Cs)
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second call: Array input, last message truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.any(String),
- // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs)
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third call: Last message is small and kept without truncation
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.any(String),
- // Small message should be kept intact
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: 'This is a small message that fits within the limit' },
- ]),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-message-truncation.mjs',
@@ -304,7 +204,41 @@ describe('LangChain integration', () => {
test('truncates messages when they exceed byte limit', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_MESSAGE_TRUNCATION })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const stringInputSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]?.value === 1,
+ );
+ expect(stringInputSpan).toBeDefined();
+ expect(stringInputSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(stringInputSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toMatch(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ );
+
+ const arrayInputSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]?.value === 2 &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ ),
+ );
+ expect(arrayInputSpan).toBeDefined();
+ expect(arrayInputSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(arrayInputSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toBeDefined();
+
+ const smallMessageSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ JSON.stringify([{ role: 'user', content: 'This is a small message that fits within the limit' }]),
+ );
+ expect(smallMessageSpan).toBeDefined();
+ expect(smallMessageSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(2);
+ expect(smallMessageSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
@@ -319,46 +253,26 @@ describe('LangChain integration', () => {
test('demonstrates timing issue with duplicate spans (ESM only)', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: event => {
- // This test highlights the limitation: if a user creates an Anthropic client
- // before importing LangChain, that client will still be instrumented and
- // could cause duplicate spans when used alongside LangChain.
-
- const spans = event.spans || [];
-
- // First call: Direct Anthropic call made BEFORE LangChain import
- // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic')
- const firstAnthropicSpan = spans.find(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic',
- );
-
- // Second call: LangChain call
- // This should have LangChain instrumentation (origin: 'auto.ai.langchain')
- const langchainSpan = spans.find(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain',
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const anthropicSpan = container.items.find(
+ span => span.attributes['sentry.origin'].value === 'auto.ai.anthropic',
);
+ expect(anthropicSpan).toBeDefined();
+ expect(anthropicSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
- // Third call: Direct Anthropic call made AFTER LangChain import
- // This should NOT have Anthropic instrumentation (skip works correctly)
- // Count how many Anthropic spans we have - should be exactly 1
- const anthropicSpans = spans.filter(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic',
+ // LangChain call is instrumented by LangChain.
+ const langchainSpan = container.items.find(
+ span => span.attributes['sentry.origin'].value === 'auto.ai.langchain',
);
-
- // Verify the edge case limitation:
- // - First Anthropic client (created before LangChain) IS instrumented
- expect(firstAnthropicSpan).toBeDefined();
- expect(firstAnthropicSpan?.origin).toBe('auto.ai.anthropic');
-
- // - LangChain call IS instrumented by LangChain
expect(langchainSpan).toBeDefined();
- expect(langchainSpan?.origin).toBe('auto.ai.langchain');
+ expect(langchainSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
- // - Second Anthropic client (created after LangChain) is NOT instrumented
- // This demonstrates that the skip mechanism works for NEW clients
- // We should only have ONE Anthropic span (the first one), not two
- expect(anthropicSpans).toHaveLength(1);
+ // Third call (not present): Direct Anthropic call made AFTER LangChain import
+ // is NOT instrumented, which demonstrates the skip mechanism works for NEW
+ // clients. We should only have ONE Anthropic span (the first one), not two.
},
})
.start()
@@ -377,18 +291,18 @@ describe('LangChain integration', () => {
test('extracts system instructions from messages', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat with extracted system instructions
+ expect(firstSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toBe(
+ JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]),
+ );
},
})
.start()
@@ -401,32 +315,37 @@ describe('LangChain integration', () => {
test('uses runName for chain spans instead of unknown_chain', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- description: 'chain format_prompt',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langchain',
- data: expect.objectContaining({
- 'langchain.chain.name': 'format_prompt',
- }),
- }),
- expect.objectContaining({
- description: 'chain parse_output',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langchain',
- data: expect.objectContaining({
- 'langchain.chain.name': 'parse_output',
- }),
- }),
- expect.objectContaining({
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chain format_prompt',
+ 'chain parse_output',
+ 'chain unknown_chain',
+ 'chat claude-3-5-sonnet-20241022',
+ ]);
+
+ const formatPromptSpan = container.items.find(span => span.name === 'chain format_prompt');
+ expect(formatPromptSpan).toBeDefined();
+ expect(formatPromptSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(formatPromptSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(formatPromptSpan!.attributes['langchain.chain.name'].value).toBe('format_prompt');
+
+ const chatSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(chatSpan).toBeDefined();
+ expect(chatSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(chatSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+
+ const parseOutputSpan = container.items.find(span => span.name === 'chain parse_output');
+ expect(parseOutputSpan).toBeDefined();
+ expect(parseOutputSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(parseOutputSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(parseOutputSpan!.attributes['langchain.chain.name'].value).toBe('parse_output');
+
+ const unknownChainSpan = container.items.find(span => span.name === 'chain unknown_chain');
+ expect(unknownChainSpan).toBeDefined();
+ expect(unknownChainSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
},
})
.start()
@@ -438,101 +357,55 @@ describe('LangChain integration', () => {
// Embeddings tests
// =========================================================================
- const EXPECTED_TRANSACTION_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // embedQuery span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536,
- }),
- description: 'embeddings text-embedding-3-small',
- op: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // embedDocuments span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- }),
- description: 'embeddings text-embedding-3-small',
- op: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Error span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- }),
- description: 'embeddings error-model',
- op: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_EMBEDDINGS_PII = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // embedQuery span with input recorded
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Hello world',
- }),
- description: 'embeddings text-embedding-3-small',
- op: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // embedDocuments span with input recorded
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: JSON.stringify(['First document', 'Second document']),
- }),
- description: 'embeddings text-embedding-3-small',
- op: GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE,
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates embedding spans with sendDefaultPii: false', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_EMBEDDINGS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'embeddings error-model',
+ 'embeddings text-embedding-3-small',
+ 'embeddings text-embedding-3-small',
+ ]);
+
+ const successfulSpans = container.items.filter(
+ span => span.name === 'embeddings text-embedding-3-small' && span.status === 'ok',
+ );
+ expect(successfulSpans).toHaveLength(2);
+ for (const span of successfulSpans) {
+ expect(span.attributes['sentry.op'].value).toBe(GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE);
+ expect(span.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(span.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('embeddings');
+ expect(span.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('openai');
+ expect(span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('text-embedding-3-small');
+ expect(span.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE].value).toBe(1536);
+ }
+
+ const errorSpan = container.items.find(span => span.name === 'embeddings error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe(GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE);
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('openai');
+ },
+ })
+ .start()
+ .completed();
});
test('does not create duplicate embedding spans from double module patching', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: event => {
- const spans = event.spans || [];
- const embeddingSpans = spans.filter(span => span.op === GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE);
+ span: container => {
// The scenario makes 3 embedding calls (2 successful + 1 error).
- expect(embeddingSpans).toHaveLength(3);
+ expect(container.items).toHaveLength(3);
+ for (const span of container.items) {
+ expect(span.attributes['sentry.op'].value).toBe(GEN_AI_EMBEDDINGS_OPERATION_ATTRIBUTE);
+ }
},
})
.start()
@@ -544,7 +417,38 @@ describe('LangChain integration', () => {
test('creates embedding spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_EMBEDDINGS_PII })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'embeddings error-model',
+ 'embeddings text-embedding-3-small',
+ 'embeddings text-embedding-3-small',
+ ]);
+
+ const querySpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Hello world',
+ );
+ expect(querySpan).toBeDefined();
+ expect(querySpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(querySpan!.status).toBe('ok');
+ expect(querySpan!.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE].value).toBe(1536);
+
+ const documentsSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value ===
+ JSON.stringify(['First document', 'Second document']),
+ );
+ expect(documentsSpan).toBeDefined();
+ expect(documentsSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(documentsSpan!.status).toBe('ok');
+
+ const errorSpan = container.items.find(span => span.name === 'embeddings error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ },
+ })
.start()
.completed();
});
@@ -552,22 +456,6 @@ describe('LangChain integration', () => {
const longContent = 'A'.repeat(50_000);
- const EXPECTED_TRANSACTION_NO_TRUNCATION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: longContent },
- { role: 'assistant', content: 'Some reply' },
- { role: 'user', content: 'Follow-up question' },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
@@ -576,7 +464,24 @@ describe('LangChain integration', () => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat with full (untruncated) input messages
+ expect(firstSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ JSON.stringify([
+ { role: 'user', content: longContent },
+ { role: 'assistant', content: 'Some reply' },
+ { role: 'user', content: 'Follow-up question' },
+ ]),
+ );
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument-with-pii.mjs
index 3d911666a7d7..7e41037dc2d6 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument.mjs
index 05985d888de9..6c76a56deccb 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts
index 032e33c75dfd..af2a4c46775d 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/langchain/v1/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import { afterAll, expect } from 'vitest';
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
@@ -29,141 +28,6 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat model with claude-3-5-sonnet
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second span - chat model with claude-3-opus
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-opus-20240229',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third span - error handling
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat model with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second span - chat model with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-opus-20240229',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.9,
- [GEN_AI_REQUEST_TOP_P_ATTRIBUTE]: 0.95,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 200,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response when recordOutputs: true
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: expect.any(String),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- }),
- description: 'chat claude-3-opus-20240229',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third span - error handling with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario.mjs',
@@ -172,7 +36,56 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('creates langchain related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat claude-3-5-sonnet-20241022',
+ 'chat claude-3-opus-20240229',
+ 'chat error-model',
+ ]);
+
+ const sonnetSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(sonnetSpan).toBeDefined();
+ expect(sonnetSpan!.status).toBe('ok');
+ expect(sonnetSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(sonnetSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(sonnetSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(sonnetSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]).toBeDefined();
+
+ const opusSpan = container.items.find(span => span.name === 'chat claude-3-opus-20240229');
+ expect(opusSpan).toBeDefined();
+ expect(opusSpan!.status).toBe('ok');
+ expect(opusSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(opusSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(opusSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-opus-20240229');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.9);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.95);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(200);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(errorSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ },
+ })
.start()
.completed();
});
@@ -194,7 +107,56 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('creates langchain related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat claude-3-5-sonnet-20241022',
+ 'chat claude-3-opus-20240229',
+ 'chat error-model',
+ ]);
+
+ const sonnetSpan = container.items.find(span => span.name === 'chat claude-3-5-sonnet-20241022');
+ expect(sonnetSpan).toBeDefined();
+ expect(sonnetSpan!.status).toBe('ok');
+ expect(sonnetSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(sonnetSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(sonnetSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(sonnetSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(sonnetSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]).toBeDefined();
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(sonnetSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const opusSpan = container.items.find(span => span.name === 'chat claude-3-opus-20240229');
+ expect(opusSpan).toBeDefined();
+ expect(opusSpan!.status).toBe('ok');
+ expect(opusSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-opus-20240229');
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.9);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE].value).toBe(0.95);
+ expect(opusSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(200);
+ expect(opusSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(opusSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toBeDefined();
+ expect(opusSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(opusSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(25);
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ expect(errorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
@@ -208,34 +170,6 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
},
);
- const EXPECTED_TRANSACTION_TOOL_CALLS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 150,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 50,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'tool_use',
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-tools.mjs',
@@ -244,7 +178,28 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('creates langchain spans with tool calls', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_TOOL_CALLS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat with tool_use stop reason
+ expect(firstSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(firstSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('anthropic');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('claude-3-5-sonnet-20241022');
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(150);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(30);
+ expect(firstSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(50);
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE].value).toBe('tool_use');
+ expect(firstSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toBeDefined();
+ },
+ })
.start()
.completed();
});
@@ -258,68 +213,6 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
},
);
- const EXPECTED_TRANSACTION_MESSAGE_TRUNCATION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First call: String input truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- // Messages should be present and should include truncated string input (contains only Cs)
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second call: Array input, last message truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.stringMatching(/^\[\{"type":"text","content":"A+"\}\]$/),
- // Messages should be present (truncation happened) and should be a JSON array of a single index (contains only Cs)
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third call: Last message is small and kept without truncation
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'anthropic',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'claude-3-5-sonnet-20241022',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.stringMatching(/^\[\{"type":"text","content":"A+"\}\]$/),
-
- // Small message should be kept intact
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: 'This is a small message that fits within the limit' },
- ]),
- }),
- description: 'chat claude-3-5-sonnet-20241022',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-message-truncation.mjs',
@@ -328,7 +221,45 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('truncates messages when they exceed byte limit', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_MESSAGE_TRUNCATION })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const stringInputSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]?.value === 1,
+ );
+ expect(stringInputSpan).toBeDefined();
+ expect(stringInputSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(stringInputSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toMatch(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ );
+
+ const arrayInputSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]?.value === 2 &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ ),
+ );
+ expect(arrayInputSpan).toBeDefined();
+ expect(arrayInputSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(arrayInputSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toMatch(
+ /^\[\{"type":"text","content":"A+"\}\]$/,
+ );
+
+ const smallMessageSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ JSON.stringify([{ role: 'user', content: 'This is a small message that fits within the limit' }]),
+ );
+ expect(smallMessageSpan).toBeDefined();
+ expect(smallMessageSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(2);
+ expect(smallMessageSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toMatch(
+ /^\[\{"type":"text","content":"A+"\}\]$/,
+ );
+ },
+ })
.start()
.completed();
});
@@ -350,46 +281,24 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('demonstrates timing issue with duplicate spans (ESM only)', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: event => {
- // This test highlights the limitation: if a user creates an Anthropic client
- // before importing LangChain, that client will still be instrumented and
- // could cause duplicate spans when used alongside LangChain.
-
- const spans = event.spans || [];
-
- // First call: Direct Anthropic call made BEFORE LangChain import
- // This should have Anthropic instrumentation (origin: 'auto.ai.anthropic')
- const firstAnthropicSpan = spans.find(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic',
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const anthropicSpan = container.items.find(
+ span => span.attributes['sentry.origin'].value === 'auto.ai.anthropic',
);
+ expect(anthropicSpan).toBeDefined();
+ expect(anthropicSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
- // Second call: LangChain call
- // This should have LangChain instrumentation (origin: 'auto.ai.langchain')
- const langchainSpan = spans.find(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.langchain',
+ const langchainSpan = container.items.find(
+ span => span.attributes['sentry.origin'].value === 'auto.ai.langchain',
);
-
- // Third call: Direct Anthropic call made AFTER LangChain import
- // This should NOT have Anthropic instrumentation (skip works correctly)
- // Count how many Anthropic spans we have - should be exactly 1
- const anthropicSpans = spans.filter(
- span => span.description === 'chat claude-3-5-sonnet-20241022' && span.origin === 'auto.ai.anthropic',
- );
-
- // Verify the edge case limitation:
- // - First Anthropic client (created before LangChain) IS instrumented
- expect(firstAnthropicSpan).toBeDefined();
- expect(firstAnthropicSpan?.origin).toBe('auto.ai.anthropic');
-
- // - LangChain call IS instrumented by LangChain
expect(langchainSpan).toBeDefined();
- expect(langchainSpan?.origin).toBe('auto.ai.langchain');
+ expect(langchainSpan!.name).toBe('chat claude-3-5-sonnet-20241022');
- // - Second Anthropic client (created after LangChain) is NOT instrumented
- // This demonstrates that the skip mechanism works for NEW clients
- // We should only have ONE Anthropic span (the first one), not two
- expect(anthropicSpans).toHaveLength(1);
+ // Third call (not present): Direct Anthropic call made AFTER LangChain import
+ // is NOT instrumented, demonstrating the skip mechanism works for NEW clients.
},
})
.start()
@@ -406,69 +315,6 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
},
);
- const EXPECTED_TRANSACTION_INIT_CHAT_MODEL = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - initChatModel with gpt-4o
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4o',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE]: 100,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4o',
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'stop',
- }),
- description: 'chat gpt-4o',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Second span - initChatModel with gpt-3.5-turbo
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.5,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE]: 'stop',
- }),
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'ok',
- }),
- // Third span - error handling
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langchain',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- }),
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.langchain',
- status: 'internal_error',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-init-chat-model.mjs',
@@ -477,7 +323,56 @@ conditionalTest({ min: 20 })('LangChain integration (v1)', () => {
test('creates langchain spans using initChatModel with OpenAI', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_INIT_CHAT_MODEL })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'chat error-model',
+ 'chat gpt-3.5-turbo',
+ 'chat gpt-4o',
+ ]);
+
+ const gpt4oSpan = container.items.find(span => span.name === 'chat gpt-4o');
+ expect(gpt4oSpan).toBeDefined();
+ expect(gpt4oSpan!.status).toBe('ok');
+ expect(gpt4oSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(gpt4oSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(gpt4oSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('chat');
+ expect(gpt4oSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('openai');
+ expect(gpt4oSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('gpt-4o');
+ expect(gpt4oSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.7);
+ expect(gpt4oSpan!.attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE].value).toBe(100);
+ expect(gpt4oSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(gpt4oSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(gpt4oSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(gpt4oSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toBeDefined();
+ expect(gpt4oSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gpt-4o');
+ expect(gpt4oSpan!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE].value).toBe('stop');
+
+ const gpt35Span = container.items.find(span => span.name === 'chat gpt-3.5-turbo');
+ expect(gpt35Span).toBeDefined();
+ expect(gpt35Span!.status).toBe('ok');
+ expect(gpt35Span!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(gpt35Span!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(gpt35Span!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('openai');
+ expect(gpt35Span!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('gpt-3.5-turbo');
+ expect(gpt35Span!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE].value).toBe(0.5);
+ expect(gpt35Span!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(8);
+ expect(gpt35Span!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(12);
+ expect(gpt35Span!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(gpt35Span!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gpt-3.5-turbo');
+ expect(gpt35Span!.attributes[GEN_AI_RESPONSE_STOP_REASON_ATTRIBUTE].value).toBe('stop');
+
+ const errorSpan = container.items.find(span => span.name === 'chat error-model');
+ expect(errorSpan).toBeDefined();
+ expect(errorSpan!.status).toBe('error');
+ expect(errorSpan!.attributes['sentry.op'].value).toBe('gen_ai.chat');
+ expect(errorSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langchain');
+ expect(errorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('openai');
+ expect(errorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('error-model');
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-no-truncation.mjs
index 91b4e4b1bae5..e91af61469c0 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-no-truncation.mjs
@@ -14,4 +14,5 @@ Sentry.init({
enableTruncation: false,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming-with-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming-with-truncation.mjs
index 2d8d986a2cd1..5bdbaf894ca4 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming-with-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming-with-truncation.mjs
@@ -13,4 +13,5 @@ Sentry.init({
enableTruncation: true,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming.mjs
index 48a860c510c5..7db8f66f7c58 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-streaming.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs
index be512ed2f773..9adaa1c11f4a 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument-with-pii.mjs
@@ -7,4 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs
index 06cc1a32e93e..69ab61714874 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/instrument.mjs
@@ -7,4 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts
index d17e789d73f9..2907c4b9e95e 100644
--- a/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/langgraph/test.ts
@@ -8,7 +8,6 @@ import {
GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_PIPELINE_NAME_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
- GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
@@ -25,185 +24,40 @@ describe('LangGraph integration', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'langgraph-test',
- spans: expect.arrayContaining([
- // create_agent span
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- },
- description: 'create_agent weather_assistant',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // First invoke_agent span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
- }),
- description: 'invoke_agent weather_assistant',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // Second invoke_agent span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
- }),
- description: 'invoke_agent weather_assistant',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'langgraph-test',
- spans: expect.arrayContaining([
- // create_agent span (PII enabled doesn't affect this span)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- },
- description: 'create_agent weather_assistant',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // First invoke_agent span with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather today?'),
- }),
- description: 'invoke_agent weather_assistant',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // Second invoke_agent span with PII and multiple messages
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'weather_assistant',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('Tell me about the weather'),
- }),
- description: 'invoke_agent weather_assistant',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_WITH_TOOLS = {
- transaction: 'langgraph-tools-test',
- spans: expect.arrayContaining([
- // create_agent span for first graph (no tool calls)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent',
- },
- description: 'create_agent tool_agent',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // invoke_agent span with tools available but not called
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_agent',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_agent',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('What is the weather?'),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringContaining('Response without calling tools'),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- }),
- description: 'invoke_agent tool_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // create_agent span for second graph (with tool calls)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent',
- },
- description: 'create_agent tool_calling_agent',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // invoke_agent span with tool calls and execution
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'tool_calling_agent',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'tool_calling_agent',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: expect.stringContaining('get_weather'),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('San Francisco'),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4-0613',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.stringMatching(/"role":"tool"/),
- // Verify tool_calls are captured
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.stringContaining('get_weather'),
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 80,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 120,
- }),
- description: 'invoke_agent tool_calling_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('should instrument LangGraph with default PII settings', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({ transaction: { transaction: 'langgraph-test' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'create_agent weather_assistant',
+ 'invoke_agent weather_assistant',
+ 'invoke_agent weather_assistant',
+ ]);
+
+ const createAgentSpan = container.items.find(span => span.name === 'create_agent weather_assistant');
+ expect(createAgentSpan).toBeDefined();
+ expect(createAgentSpan!.status).toBe('ok');
+ expect(createAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+ expect(createAgentSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langgraph');
+ expect(createAgentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('create_agent');
+ expect(createAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('weather_assistant');
+
+ const invokeAgentSpans = container.items.filter(span => span.name === 'invoke_agent weather_assistant');
+ expect(invokeAgentSpans).toHaveLength(2);
+ for (const span of invokeAgentSpans) {
+ expect(span.status).toBe('ok');
+ expect(span.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(span.attributes['sentry.origin'].value).toBe('auto.ai.langgraph');
+ expect(span.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('invoke_agent');
+ expect(span.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('weather_assistant');
+ expect(span.attributes[GEN_AI_PIPELINE_NAME_ATTRIBUTE].value).toBe('weather_assistant');
+ }
+ },
+ })
.start()
.completed();
});
@@ -213,7 +67,33 @@ describe('LangGraph integration', () => {
test('should instrument LangGraph with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({ transaction: { transaction: 'langgraph-test' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const createAgentSpan = container.items.find(span => span.name === 'create_agent weather_assistant');
+ expect(createAgentSpan).toBeDefined();
+ expect(createAgentSpan!.status).toBe('ok');
+ expect(createAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+
+ const weatherTodaySpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes('What is the weather today?'),
+ );
+ expect(weatherTodaySpan).toBeDefined();
+ expect(weatherTodaySpan!.name).toBe('invoke_agent weather_assistant');
+ expect(weatherTodaySpan!.status).toBe('ok');
+ expect(weatherTodaySpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(weatherTodaySpan!.attributes['sentry.origin'].value).toBe('auto.ai.langgraph');
+
+ const weatherDetailsSpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Tell me about the weather'),
+ );
+ expect(weatherDetailsSpan).toBeDefined();
+ expect(weatherDetailsSpan!.name).toBe('invoke_agent weather_assistant');
+ expect(weatherDetailsSpan!.status).toBe('ok');
+ expect(weatherDetailsSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ },
+ })
.start()
.completed();
});
@@ -221,75 +101,109 @@ describe('LangGraph integration', () => {
createEsmAndCjsTests(__dirname, 'scenario-tools.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('should capture tools from LangGraph agent', { timeout: 30000 }, async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_WITH_TOOLS }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'langgraph-tools-test' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ expect(container.items.map(span => span.name).sort()).toEqual([
+ 'create_agent tool_agent',
+ 'create_agent tool_calling_agent',
+ 'invoke_agent tool_agent',
+ 'invoke_agent tool_calling_agent',
+ ]);
+
+ const toolAgentSpan = container.items.find(span => span.name === 'create_agent tool_agent');
+ expect(toolAgentSpan).toBeDefined();
+ expect(toolAgentSpan!.status).toBe('ok');
+ expect(toolAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+ expect(toolAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('tool_agent');
+
+ const toolAgentInvokeSpan = container.items.find(span => span.name === 'invoke_agent tool_agent');
+ expect(toolAgentInvokeSpan).toBeDefined();
+ expect(toolAgentInvokeSpan!.status).toBe('ok');
+ expect(toolAgentInvokeSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE].value).toContain(
+ 'get_weather',
+ );
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'What is the weather?',
+ );
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gpt-4-0613');
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toContain(
+ 'Response without calling tools',
+ );
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(25);
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(toolAgentInvokeSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(40);
+
+ const toolCallingAgentSpan = container.items.find(span => span.name === 'create_agent tool_calling_agent');
+ expect(toolCallingAgentSpan).toBeDefined();
+ expect(toolCallingAgentSpan!.status).toBe('ok');
+ expect(toolCallingAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+ expect(toolCallingAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('tool_calling_agent');
+
+ const toolCallingInvokeSpan = container.items.find(span => span.name === 'invoke_agent tool_calling_agent');
+ expect(toolCallingInvokeSpan).toBeDefined();
+ expect(toolCallingInvokeSpan!.status).toBe('ok');
+ expect(toolCallingInvokeSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toContain('San Francisco');
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('gpt-4-0613');
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE].value).toMatch(/"role":"tool"/);
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE].value).toContain(
+ 'get_weather',
+ );
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(80);
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(40);
+ expect(toolCallingInvokeSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(120);
+ },
+ })
+ .start()
+ .completed();
});
});
// Test for thread_id (conversation ID) support
- const EXPECTED_TRANSACTION_THREAD_ID = {
- transaction: 'langgraph-thread-id-test',
- spans: expect.arrayContaining([
- // create_agent span
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent',
- },
- description: 'create_agent thread_test_agent',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // First invoke_agent span with thread_id
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent',
- // The thread_id should be captured as conversation.id
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_abc123_session_1',
- }),
- description: 'invoke_agent thread_test_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // Second invoke_agent span with different thread_id
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'thread_test_agent',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'thread_test_agent',
- // Different thread_id for different conversation
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'thread_xyz789_session_2',
- }),
- description: 'invoke_agent thread_test_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // Third invoke_agent span without thread_id (should NOT have gen_ai.conversation.id)
- expect.objectContaining({
- data: expect.not.objectContaining({
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(),
- }),
- description: 'invoke_agent thread_test_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-thread-id.mjs', 'instrument.mjs', (createRunner, test) => {
test('should capture thread_id as gen_ai.conversation.id', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_THREAD_ID }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({ transaction: { transaction: 'langgraph-thread-id-test' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const createAgentSpan = container.items.find(span => span.name === 'create_agent thread_test_agent');
+ expect(createAgentSpan).toBeDefined();
+ expect(createAgentSpan!.status).toBe('ok');
+ expect(createAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+
+ const firstThreadSpan = container.items.find(
+ span => span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]?.value === 'thread_abc123_session_1',
+ );
+ expect(firstThreadSpan).toBeDefined();
+ expect(firstThreadSpan!.name).toBe('invoke_agent thread_test_agent');
+ expect(firstThreadSpan!.status).toBe('ok');
+ expect(firstThreadSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+
+ const secondThreadSpan = container.items.find(
+ span => span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]?.value === 'thread_xyz789_session_2',
+ );
+ expect(secondThreadSpan).toBeDefined();
+ expect(secondThreadSpan!.name).toBe('invoke_agent thread_test_agent');
+ expect(secondThreadSpan!.status).toBe('ok');
+
+ const noThreadSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent thread_test_agent' &&
+ span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE] === undefined,
+ );
+ expect(noThreadSpan).toBeDefined();
+ expect(noThreadSpan!.status).toBe('ok');
+ },
+ })
+ .start()
+ .completed();
});
});
@@ -301,18 +215,16 @@ describe('LangGraph integration', () => {
test('extracts system instructions from messages', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent test-agent');
+
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toBe(
+ JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]),
+ );
},
})
.start()
@@ -322,69 +234,48 @@ describe('LangGraph integration', () => {
);
// Test for null input resume scenario
- const EXPECTED_TRANSACTION_RESUME = {
- transaction: 'langgraph-resume-test',
- contexts: {
- trace: expect.objectContaining({
- status: 'ok',
- }),
- },
- spans: expect.arrayContaining([
- // create_agent span
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.create_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'resume_agent',
- },
- description: 'create_agent resume_agent',
- op: 'gen_ai.create_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- // invoke_agent span with null input (resume)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.langgraph',
- [GEN_AI_AGENT_NAME_ATTRIBUTE]: 'resume_agent',
- [GEN_AI_PIPELINE_NAME_ATTRIBUTE]: 'resume_agent',
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'resume-thread-1',
- }),
- description: 'invoke_agent resume_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.ai.langgraph',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-resume.mjs', 'instrument.mjs', (createRunner, test) => {
test('should not throw when invoke is called with null input (resume scenario)', async () => {
- await createRunner().ignore('event').expect({ transaction: EXPECTED_TRANSACTION_RESUME }).start().completed();
+ await createRunner()
+ .ignore('event')
+ .expect({
+ transaction: {
+ transaction: 'langgraph-resume-test',
+ contexts: {
+ trace: expect.objectContaining({
+ status: 'ok',
+ }),
+ },
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const createAgentSpan = container.items.find(span => span.name === 'create_agent resume_agent');
+ expect(createAgentSpan).toBeDefined();
+ expect(createAgentSpan!.status).toBe('ok');
+ expect(createAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.create_agent');
+ expect(createAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('resume_agent');
+
+ const invokeAgentSpan = container.items.find(
+ span => span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]?.value === 'resume-thread-1',
+ );
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent resume_agent');
+ expect(invokeAgentSpan!.status).toBe('ok');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.origin'].value).toBe('auto.ai.langgraph');
+ expect(invokeAgentSpan!.attributes[GEN_AI_AGENT_NAME_ATTRIBUTE].value).toBe('resume_agent');
+ expect(invokeAgentSpan!.attributes[GEN_AI_PIPELINE_NAME_ATTRIBUTE].value).toBe('resume_agent');
+ },
+ })
+ .start()
+ .completed();
});
});
const longContent = 'A'.repeat(50_000);
- const EXPECTED_TRANSACTION_NO_TRUNCATION = {
- transaction: 'langgraph-test',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: longContent },
- { role: 'assistant', content: 'Some reply' },
- { role: 'user', content: 'Follow-up question' },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
@@ -393,7 +284,25 @@ describe('LangGraph integration', () => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
+ .expect({ transaction: { transaction: 'langgraph-test' } })
+ .expect({
+ span: container => {
+ const expectedMessages = JSON.stringify([
+ { role: 'user', content: longContent },
+ { role: 'assistant', content: 'Some reply' },
+ { role: 'user', content: 'Follow-up question' },
+ ]);
+
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(
+ span => span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value === expectedMessages,
+ );
+
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent weather_assistant');
+ expect(invokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-no-truncation.mjs
index 0dd039762f1f..a8c83a29bcfd 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-no-truncation.mjs
@@ -20,4 +20,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs
index f3fbac9d1274..4fad2b24a6d6 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-root-span.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: false,
transport: loggingTransport,
integrations: [Sentry.openAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming-with-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming-with-truncation.mjs
index 097c7adcf087..6aef96c5dcc9 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming-with-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming-with-truncation.mjs
@@ -13,4 +13,5 @@ Sentry.init({
enableTruncation: true,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming.mjs
index 48a860c510c5..7db8f66f7c58 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-streaming.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
traceLifecycle: 'stream',
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs
index 86219de9983a..1fd24cc3fa8f 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-options.mjs
@@ -19,4 +19,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs
index 74bc63db971b..00a67de0ee35 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs
index 1ff3990a0693..ac81315e85e4 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument-with-pii.mjs
index a53a13af7738..6826a894f085 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument-with-pii.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
integrations: [Sentry.openAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument.mjs
index f3fbac9d1274..4fad2b24a6d6 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/instrument.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: false,
transport: loggingTransport,
integrations: [Sentry.openAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts
index c66f5cb65c6b..f4185bfc66f8 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts
@@ -76,212 +76,249 @@ describe('OpenAI Tool Calls integration', () => {
},
]);
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat completion with tools (non-streaming)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - chat completion with tools and streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - responses API with tools (non-streaming)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fourth span - responses API with tools and streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - chat completion with tools (non-streaming) with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-tools-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '[""]',
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: CHAT_TOOL_CALLS,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - chat completion with tools and streaming with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-tools-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["tool_calls"]',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: CHAT_STREAM_TOOL_CALLS,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - responses API with tools (non-streaming) with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_tools_789',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: RESPONSES_TOOL_CALLS,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fourth span - responses API with tools and streaming with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: WEATHER_TOOL_DEFINITION,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_tools_789',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: RESPONSES_TOOL_CALLS,
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates openai tool calls related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const chatToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-tools-123',
+ );
+ expect(chatToolsSpan).toBeDefined();
+ expect(chatToolsSpan!.name).toBe('chat gpt-4');
+ expect(chatToolsSpan!.status).toBe('ok');
+ expect(chatToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(chatToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-tools-123',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["tool_calls"]',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 40,
+ });
+
+ const streamingChatToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-tools-123',
+ );
+ expect(streamingChatToolsSpan).toBeDefined();
+ expect(streamingChatToolsSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatToolsSpan!.status).toBe('ok');
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-tools-123',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["tool_calls"]',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 40,
+ });
+
+ const responsesToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_tools_789',
+ );
+ expect(responsesToolsSpan).toBeDefined();
+ expect(responsesToolsSpan!.name).toBe('chat gpt-4');
+ expect(responsesToolsSpan!.status).toBe('ok');
+ expect(responsesToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_tools_789',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+
+ const streamingResponsesToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_tools_789',
+ );
+ expect(streamingResponsesToolsSpan).toBeDefined();
+ expect(streamingResponsesToolsSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesToolsSpan!.status).toBe('ok');
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_tools_789',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+ },
+ })
.start()
.completed();
});
@@ -291,7 +328,297 @@ describe('OpenAI Tool Calls integration', () => {
test('creates openai tool calls related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const chatToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-tools-123',
+ );
+ expect(chatToolsSpan).toBeDefined();
+ expect(chatToolsSpan!.name).toBe('chat gpt-4');
+ expect(chatToolsSpan!.status).toBe('ok');
+ expect(chatToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(chatToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-tools-123',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["tool_calls"]',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[""]',
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: CHAT_TOOL_CALLS,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+ expect(chatToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 40,
+ });
+
+ const streamingChatToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-tools-123',
+ );
+ expect(streamingChatToolsSpan).toBeDefined();
+ expect(streamingChatToolsSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatToolsSpan!.status).toBe('ok');
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-tools-123',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["tool_calls"]',
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: CHAT_STREAM_TOOL_CALLS,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+ expect(streamingChatToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 40,
+ });
+
+ const responsesToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_tools_789',
+ );
+ expect(responsesToolsSpan).toBeDefined();
+ expect(responsesToolsSpan!.name).toBe('chat gpt-4');
+ expect(responsesToolsSpan!.status).toBe('ok');
+ expect(responsesToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(responsesToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_tools_789',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: RESPONSES_TOOL_CALLS,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(responsesToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+
+ const streamingResponsesToolsSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_tools_789',
+ );
+ expect(streamingResponsesToolsSpan).toBeDefined();
+ expect(streamingResponsesToolsSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesToolsSpan!.status).toBe('ok');
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the weather like in Paris today?"}]',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: WEATHER_TOOL_DEFINITION,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_tools_789',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: RESPONSES_TOOL_CALLS,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingResponsesToolsSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 20,
+ });
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts
index e3ecc4f80ae0..2d5d1daa68ce 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/test.ts
@@ -29,297 +29,301 @@ describe('OpenAI integration', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic chat completion without PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - responses API
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - error handling
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Fourth span - chat completions streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8,
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fifth span - responses API streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Sixth span - error handling in streaming context
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic chat completion with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]',
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant.' },
- ]),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - responses API with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - error handling with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Fourth span - chat completions streaming with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8,
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]',
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant.' },
- ]),
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fifth span - responses API streaming with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Test streaming responses API',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]:
- 'Streaming response to: Test streaming responses APITest streaming responses API',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Sixth span - error handling in streaming context with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_WITH_OPTIONS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Check that custom options are respected
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- }),
- }),
- // Check that custom options are respected for streaming
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, // Should be marked as stream
- }),
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-chat.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates openai related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(chatCompletionSpan!.status).toBe('ok');
+ expect(chatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.7,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+
+ const responsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_mock456',
+ );
+ expect(responsesSpan).toBeDefined();
+ expect(responsesSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(responsesSpan!.status).toBe('ok');
+ expect(responsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(responsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock456',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 5,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 13,
+ });
+
+ const nonStreamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(nonStreamingErrorSpan).toBeDefined();
+ expect(nonStreamingErrorSpan!.name).toBe('chat error-model');
+ expect(nonStreamingErrorSpan!.status).toBe('error');
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatCompletionSpan!.status).toBe('ok');
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.8,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-123',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 18,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 30,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingResponsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_456',
+ );
+ expect(streamingResponsesSpan).toBeDefined();
+ expect(streamingResponsesSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesSpan!.status).toBe('ok');
+ expect(streamingResponsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_456',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 6,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 16,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(streamingErrorSpan).toBeDefined();
+ expect(streamingErrorSpan!.name).toBe('chat error-model');
+ expect(streamingErrorSpan!.status).toBe('error');
+ expect(streamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ },
+ })
.start()
.completed();
});
@@ -329,7 +333,369 @@ describe('OpenAI integration', () => {
test('creates openai related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(chatCompletionSpan!.status).toBe('ok');
+ expect(chatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.7,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the capital of France?"}]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: JSON.stringify([{ type: 'text', content: 'You are a helpful assistant.' }]),
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["Hello from OpenAI mock!"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+
+ const responsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_mock456',
+ );
+ expect(responsesSpan).toBeDefined();
+ expect(responsesSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(responsesSpan!.status).toBe('ok');
+ expect(responsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(responsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Translate this to French: Hello',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Response to: Translate this to French: Hello',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock456',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 5,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 13,
+ });
+
+ const nonStreamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(nonStreamingErrorSpan).toBeDefined();
+ expect(nonStreamingErrorSpan!.name).toBe('chat error-model');
+ expect(nonStreamingErrorSpan!.status).toBe('error');
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"This will fail"}]',
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatCompletionSpan!.status).toBe('ok');
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.8,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"Tell me about streaming"}]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: JSON.stringify([{ type: 'text', content: 'You are a helpful assistant.' }]),
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Hello from OpenAI streaming!',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-123',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 18,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 30,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingResponsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_456',
+ );
+ expect(streamingResponsesSpan).toBeDefined();
+ expect(streamingResponsesSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesSpan!.status).toBe('ok');
+ expect(streamingResponsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Test streaming responses API',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Streaming response to: Test streaming responses APITest streaming responses API',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_456',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 6,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 16,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(streamingErrorSpan).toBeDefined();
+ expect(streamingErrorSpan!.name).toBe('chat error-model');
+ expect(streamingErrorSpan!.status).toBe('error');
+ expect(streamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"This will fail"}]',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ },
+ })
.start()
.completed();
});
@@ -339,7 +705,42 @@ describe('OpenAI integration', () => {
test('creates openai related spans with custom options', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ },
+ })
.start()
.completed();
});
@@ -347,30 +748,6 @@ describe('OpenAI integration', () => {
const longContent = 'A'.repeat(50_000);
- const EXPECTED_TRANSACTION_NO_TRUNCATION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Multiple messages should all be preserved (no popping to last message only)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: longContent },
- { role: 'assistant', content: 'Some reply' },
- { role: 'user', content: 'Follow-up question' },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- // Responses API long string input should not be truncated or wrapped in quotes
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'B'.repeat(50_000),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- }),
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-no-truncation.mjs',
@@ -379,115 +756,187 @@ describe('OpenAI integration', () => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_NO_TRUNCATION })
+ .expect({
+ transaction: {
+ transaction: 'main',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: JSON.stringify([
+ { role: 'user', content: longContent },
+ { role: 'assistant', content: 'Some reply' },
+ { role: 'user', content: 'Follow-up question' },
+ ]),
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toMatchObject({
+ type: 'integer',
+ value: 3,
+ });
+
+ const responsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_mock456',
+ );
+ expect(responsesSpan).toBeDefined();
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock456',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: 'B'.repeat(50_000),
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toMatchObject({
+ type: 'integer',
+ value: 1,
+ });
+ },
+ })
.start()
.completed();
});
},
);
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embeddings API
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float',
- [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - embeddings API error model
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embeddings API with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float',
- [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536,
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - embeddings API error model with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Error embedding test!',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Third span - embeddings API with multiple inputs (this does not get truncated)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input text","Second input text","Third input text"]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates openai related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS })
+ .expect({
+ transaction: {
+ transaction: 'main',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const singleEmbeddingSpan = container.items.find(
+ span =>
+ span.name === 'embeddings text-embedding-3-small' &&
+ span.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] !== undefined,
+ );
+ expect(singleEmbeddingSpan).toBeDefined();
+ expect(singleEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(singleEmbeddingSpan!.status).toBe('ok');
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'float',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1536,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+
+ const errorEmbeddingSpan = container.items.find(span => span.name === 'embeddings error-model');
+ expect(errorEmbeddingSpan).toBeDefined();
+ expect(errorEmbeddingSpan!.name).toBe('embeddings error-model');
+ expect(errorEmbeddingSpan!.status).toBe('error');
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+
+ const multiEmbeddingSpan = container.items.find(
+ span =>
+ span.name === 'embeddings text-embedding-3-small' &&
+ span.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] === undefined,
+ );
+ expect(multiEmbeddingSpan).toBeDefined();
+ expect(multiEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(multiEmbeddingSpan!.status).toBe('ok');
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ },
+ })
.start()
.completed();
});
@@ -497,7 +946,142 @@ describe('OpenAI integration', () => {
test('creates openai related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS })
+ .expect({
+ transaction: {
+ transaction: 'main',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const singleEmbeddingSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Embedding test!',
+ );
+ expect(singleEmbeddingSpan).toBeDefined();
+ expect(singleEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(singleEmbeddingSpan!.status).toBe('ok');
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'float',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1536,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Embedding test!',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+
+ const errorEmbeddingSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Error embedding test!',
+ );
+ expect(errorEmbeddingSpan).toBeDefined();
+ expect(errorEmbeddingSpan!.name).toBe('embeddings error-model');
+ expect(errorEmbeddingSpan!.status).toBe('error');
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Error embedding test!',
+ });
+
+ const multiEmbeddingSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value ===
+ '["First input text","Second input text","Third input text"]',
+ );
+ expect(multiEmbeddingSpan).toBeDefined();
+ expect(multiEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(multiEmbeddingSpan!.status).toBe('ok');
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["First input text","Second input text","Third input text"]',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ },
+ })
.start()
.completed();
});
@@ -598,50 +1182,91 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- // First call: Last message is large and gets truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- // Messages should be present (truncation happened) and should be a JSON array of a single index
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[\{"role":"user","content":"C+"\}\]$/),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.stringMatching(
- /^\[\{"type":"text","content":"A+"\}\]$/,
- ),
- }),
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second call: Last message is small and kept without truncation
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- // Small message should be kept intact
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
- { role: 'user', content: 'This is a small message that fits within the limit' },
- ]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 2,
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.stringMatching(
- /^\[\{"type":"text","content":"A+"\}\]$/,
- ),
- }),
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const truncatedMessageSpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ ),
+ );
+ expect(truncatedMessageSpan).toBeDefined();
+ expect(truncatedMessageSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(truncatedMessageSpan!.status).toBe('ok');
+ expect(truncatedMessageSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(truncatedMessageSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(truncatedMessageSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(truncatedMessageSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(truncatedMessageSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(truncatedMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 2,
+ });
+ expect(truncatedMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toMatch(
+ /^\[\{"role":"user","content":"C+"\}\]$/,
+ );
+ expect(truncatedMessageSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toMatch(
+ /^\[\{"type":"text","content":"A+"\}\]$/,
+ );
+
+ const smallMessageSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ JSON.stringify([{ role: 'user', content: 'This is a small message that fits within the limit' }]),
+ );
+ expect(smallMessageSpan).toBeDefined();
+ expect(smallMessageSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(smallMessageSpan!.status).toBe('ok');
+ expect(smallMessageSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(smallMessageSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(smallMessageSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(smallMessageSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(smallMessageSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: JSON.stringify([
+ { role: 'user', content: 'This is a small message that fits within the limit' },
+ ]),
+ });
+ expect(smallMessageSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 2,
+ });
+ expect(smallMessageSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toMatch(
+ /^\[\{"type":"text","content":"A+"\}\]$/,
+ );
},
})
.start()
@@ -661,24 +1286,35 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- // Messages should be present and should include truncated string input (contains only As)
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^A+$/),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- }),
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] long A-string input is truncated
+ expect(firstSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(firstSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(firstSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(firstSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(firstSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toMatch(/^A+$/);
},
})
.start()
@@ -688,201 +1324,186 @@ describe('OpenAI integration', () => {
);
// Test for conversation ID support (Conversations API and previous_response_id)
- const EXPECTED_TRANSACTION_CONVERSATION = {
- transaction: 'conversation-test',
- spans: expect.arrayContaining([
- // First span - conversations.create returns conversation object with id
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- // The conversation ID should be captured from the response
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab',
- }),
- description: 'chat unknown',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - responses.create with conversation parameter
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- // The conversation ID should be captured from the request
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab',
- }),
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - responses.create without conversation (first in chain, should NOT have gen_ai.conversation.id)
- expect.objectContaining({
- data: expect.not.objectContaining({
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: expect.anything(),
- }),
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fourth span - responses.create with previous_response_id (chaining)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- // The previous_response_id should be captured as conversation.id
- [GEN_AI_CONVERSATION_ID_ATTRIBUTE]: 'resp_mock_conv_123',
- }),
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-conversation.mjs', 'instrument.mjs', (createRunner, test) => {
test('captures conversation ID from Conversations API and previous_response_id', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_CONVERSATION })
+ .expect({
+ transaction: {
+ transaction: 'conversation-test',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const conversationCreateSpan = container.items.find(span => span.name === 'chat unknown');
+ expect(conversationCreateSpan).toBeDefined();
+ expect(conversationCreateSpan!.name).toBe('chat unknown');
+ expect(conversationCreateSpan!.status).toBe('ok');
+ expect(conversationCreateSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(conversationCreateSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(conversationCreateSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(conversationCreateSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(conversationCreateSpan!.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab',
+ });
+
+ const conversationResponseSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]?.value ===
+ 'conv_689667905b048191b4740501625afd940c7533ace33a2dab' &&
+ span.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]?.value === 'gpt-4',
+ );
+ expect(conversationResponseSpan).toBeDefined();
+ expect(conversationResponseSpan!.status).toBe('ok');
+ expect(conversationResponseSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(conversationResponseSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(conversationResponseSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(conversationResponseSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(conversationResponseSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(conversationResponseSpan!.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'conv_689667905b048191b4740501625afd940c7533ace33a2dab',
+ });
+
+ const unlinkedResponseSpan = container.items.find(
+ span =>
+ span.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]?.value === 'gen_ai.chat' &&
+ span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE] === undefined,
+ );
+ expect(unlinkedResponseSpan).toBeDefined();
+ expect(unlinkedResponseSpan!.status).toBe('ok');
+ expect(unlinkedResponseSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(unlinkedResponseSpan!.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]).toBeUndefined();
+
+ const previousResponseSpan = container.items.find(
+ span => span.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]?.value === 'resp_mock_conv_123',
+ );
+ expect(previousResponseSpan).toBeDefined();
+ expect(previousResponseSpan!.status).toBe('ok');
+ expect(previousResponseSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(previousResponseSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(previousResponseSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(previousResponseSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(previousResponseSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(previousResponseSpan!.attributes[GEN_AI_CONVERSATION_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock_conv_123',
+ });
+ },
+ })
.start()
.completed();
});
});
// Test for manual conversation ID setting using setConversationId()
- const EXPECTED_TRANSACTION_MANUAL_CONVERSATION_ID = {
- transaction: 'chat-with-manual-conversation-id',
- spans: expect.arrayContaining([
- // All three chat completion spans should have the same manually-set conversation ID
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'user_chat_session_abc123',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'gen_ai.operation.name': 'chat',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'user_chat_session_abc123',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'gen_ai.operation.name': 'chat',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'user_chat_session_abc123',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'gen_ai.operation.name': 'chat',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-manual-conversation-id.mjs', 'instrument.mjs', (createRunner, test) => {
test('attaches manual conversation ID set via setConversationId() to all chat spans', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_MANUAL_CONVERSATION_ID })
+ .expect({
+ transaction: {
+ transaction: 'chat-with-manual-conversation-id',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+
+ // All three chat completion spans should have the same manually-set conversation ID
+ for (const span of container.items) {
+ expect(span!.name).toBe('chat gpt-4');
+ expect(span!.status).toBe('ok');
+ expect(span!.attributes['gen_ai.conversation.id']).toEqual({
+ type: 'string',
+ value: 'user_chat_session_abc123',
+ });
+ expect(span!.attributes['gen_ai.system']).toEqual({ type: 'string', value: 'openai' });
+ expect(span!.attributes['gen_ai.request.model']).toEqual({ type: 'string', value: 'gpt-4' });
+ expect(span!.attributes['gen_ai.operation.name']).toEqual({ type: 'string', value: 'chat' });
+ expect(span!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ }
+ },
+ })
.start()
.completed();
});
});
- // Test for scope isolation - different scopes have different conversation IDs
- const EXPECTED_TRANSACTION_CONVERSATION_1 = {
- transaction: 'GET /chat/conversation-1',
- spans: expect.arrayContaining([
- // Both chat completion spans in conversation 1 should have conv_user1_session_abc
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv_user1_session_abc',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv_user1_session_abc',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_CONVERSATION_2 = {
- transaction: 'GET /chat/conversation-2',
- spans: expect.arrayContaining([
- // Both chat completion spans in conversation 2 should have conv_user2_session_xyz
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv_user2_session_xyz',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv_user2_session_xyz',
- 'gen_ai.system': 'openai',
- 'gen_ai.request.model': 'gpt-4',
- 'sentry.op': 'gen_ai.chat',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-separate-scope-1.mjs', 'instrument.mjs', (createRunner, test) => {
test('isolates conversation IDs across separate scopes - conversation 1', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_CONVERSATION_1 })
+ .expect({
+ transaction: {
+ transaction: 'GET /chat/conversation-1',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+
+ // Both chat completion spans should have the expected conversation ID
+ for (const span of container.items) {
+ expect(span!.name).toBe('chat gpt-4');
+ expect(span!.status).toBe('ok');
+ expect(span!.attributes['gen_ai.conversation.id']).toEqual({
+ type: 'string',
+ value: 'conv_user1_session_abc',
+ });
+ expect(span!.attributes['gen_ai.system']).toEqual({ type: 'string', value: 'openai' });
+ expect(span!.attributes['gen_ai.request.model']).toEqual({ type: 'string', value: 'gpt-4' });
+ expect(span!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ }
+ },
+ })
.start()
.completed();
});
@@ -892,7 +1513,29 @@ describe('OpenAI integration', () => {
test('isolates conversation IDs across separate scopes - conversation 2', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_CONVERSATION_2 })
+ .expect({
+ transaction: {
+ transaction: 'GET /chat/conversation-2',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+
+ // Both chat completion spans should have the expected conversation ID
+ for (const span of container.items) {
+ expect(span!.name).toBe('chat gpt-4');
+ expect(span!.status).toBe('ok');
+ expect(span!.attributes['gen_ai.conversation.id']).toEqual({
+ type: 'string',
+ value: 'conv_user2_session_xyz',
+ });
+ expect(span!.attributes['gen_ai.system']).toEqual({ type: 'string', value: 'openai' });
+ expect(span!.attributes['gen_ai.request.model']).toEqual({ type: 'string', value: 'gpt-4' });
+ expect(span!.attributes['sentry.op']).toEqual({ type: 'string', value: 'gen_ai.chat' });
+ }
+ },
+ })
.start()
.completed();
});
@@ -909,15 +1552,18 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(1);
+ const [firstSpan] = container.items;
+
+ // [0] chat completion with system instructions extracted from messages
+ expect(firstSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]),
+ });
},
})
.start()
@@ -933,30 +1579,23 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- // First call using .withResponse()
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- status: 'ok',
- }),
- // Second call using .asResponse()
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-withresponse',
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+
+ // Both calls should produce spans with the same response ID
+ for (const span of container.items) {
+ expect(span!.name).toBe('chat gpt-4');
+ expect(span!.status).toBe('ok');
+ expect(span!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(span!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({ type: 'string', value: 'gpt-4' });
+ expect(span!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-withresponse',
+ });
+ }
},
})
.start()
@@ -971,32 +1610,24 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- // Single image vision request
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4o',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('[Blob substitute]'),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- }),
- description: 'chat gpt-4o',
- op: 'gen_ai.chat',
- status: 'ok',
- }),
- // Multiple images vision request
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4o',
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('[Blob substitute]'),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- }),
- description: 'chat gpt-4o',
- op: 'gen_ai.chat',
- status: 'ok',
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+
+ // Both vision request spans should contain [Blob substitute]
+ for (const span of container.items) {
+ expect(span!.name).toBe('chat gpt-4o');
+ expect(span!.status).toBe('ok');
+ expect(span!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({ type: 'string', value: 'chat' });
+ expect(span!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({ type: 'string', value: 'gpt-4o' });
+ expect(span!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(span!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toContain('[Blob substitute]');
+ }
},
})
.start()
@@ -1009,14 +1640,18 @@ describe('OpenAI integration', () => {
.expect({
transaction: {
transaction: 'main',
- spans: expect.arrayContaining([
- // The second span (multiple images) should still contain the https URL
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining('https://example.com/image.png'),
- }),
- }),
- ]),
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const multipleImagesSpan = container.items.find(span =>
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes('https://example.com/image.png'),
+ );
+ expect(multipleImagesSpan).toBeDefined();
+ expect(multipleImagesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'https://example.com/image.png',
+ );
},
})
.start()
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs
index f3fbac9d1274..4fad2b24a6d6 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-root-span.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: false,
transport: loggingTransport,
integrations: [Sentry.openAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs
index 51da27f73bbc..4632dcada76a 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-options.mjs
@@ -20,4 +20,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs
index 74bc63db971b..00a67de0ee35 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument-with-pii.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs
index 1ff3990a0693..ac81315e85e4 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/instrument.mjs
@@ -14,4 +14,5 @@ Sentry.init({
}
return event;
},
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts
index b282282305eb..f19302122622 100644
--- a/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts
@@ -28,390 +28,6 @@ describe('OpenAI integration (V6)', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic chat completion without PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - responses API
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - error handling
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Fourth span - chat completions streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8,
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fifth span - responses API streaming
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- },
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Sixth span - error handling in streaming context
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - basic chat completion with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.7,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the capital of France?"}]',
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: '[{"type":"text","content":"You are a helpful assistant."}]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-mock123',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: '["Hello from OpenAI mock!"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 25,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - responses API with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Translate this to French: Hello',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to: Translate this to French: Hello',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["completed"]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-3.5-turbo',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_mock456',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 5,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 8,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 13,
- },
- description: 'chat gpt-3.5-turbo',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Third span - error handling with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Fourth span - chat completions streaming with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]: 0.8,
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Tell me about streaming"}]',
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: '[{"type":"text","content":"You are a helpful assistant."}]',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Hello from OpenAI streaming!',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["stop"]',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'chatcmpl-stream-123',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 12,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 18,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Fifth span - responses API streaming with PII
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: 'Test streaming responses API',
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]:
- 'Streaming response to: Test streaming responses APITest streaming responses API',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: '["in_progress","completed"]',
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: 'resp_stream_456',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'gpt-4',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 6,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 16,
- [GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]: true,
- }),
- description: 'chat gpt-4',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Sixth span - error handling in streaming context with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'chat',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"This will fail"}]',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.chat',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- },
- description: 'chat error-model',
- op: 'gen_ai.chat',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_WITH_OPTIONS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Check that custom options are respected
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.any(String), // System instructions should be extracted
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- }),
- }),
- // Check that custom options are respected for streaming
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String), // Should include messages when recordInputs: true
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: expect.any(String), // System instructions should be extracted
- [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String), // Should include response text when recordOutputs: true
- [GEN_AI_REQUEST_STREAM_ATTRIBUTE]: true, // Should be marked as stream
- }),
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embeddings API
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float',
- [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - embeddings API error model
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- ]),
- };
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - embeddings API with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]: 'float',
- [GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]: 1536,
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- // Second span - embeddings API error model with PII
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'error-model',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Error embedding test!',
- },
- description: 'embeddings error-model',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'internal_error',
- }),
- // Third span - embeddings API with multiple inputs (this does not get truncated)
- expect.objectContaining({
- data: {
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.openai',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'openai',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input text","Second input text","Third input text"]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'text-embedding-3-small',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- },
- description: 'embeddings text-embedding-3-small',
- op: 'gen_ai.embeddings',
- origin: 'auto.ai.openai',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario-chat.mjs',
@@ -420,7 +36,297 @@ describe('OpenAI integration (V6)', () => {
test('creates openai related spans with sendDefaultPii: false (v6)', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_CHAT })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(chatCompletionSpan!.status).toBe('ok');
+ expect(chatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.7,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+
+ const responsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_mock456',
+ );
+ expect(responsesSpan).toBeDefined();
+ expect(responsesSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(responsesSpan!.status).toBe('ok');
+ expect(responsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(responsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock456',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 5,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 13,
+ });
+
+ const nonStreamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(nonStreamingErrorSpan).toBeDefined();
+ expect(nonStreamingErrorSpan!.name).toBe('chat error-model');
+ expect(nonStreamingErrorSpan!.status).toBe('error');
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatCompletionSpan!.status).toBe('ok');
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.8,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-123',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 18,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 30,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingResponsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_456',
+ );
+ expect(streamingResponsesSpan).toBeDefined();
+ expect(streamingResponsesSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesSpan!.status).toBe('ok');
+ expect(streamingResponsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_456',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 6,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 16,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(streamingErrorSpan).toBeDefined();
+ expect(streamingErrorSpan!.name).toBe('chat error-model');
+ expect(streamingErrorSpan!.status).toBe('error');
+ expect(streamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ },
+ })
.start()
.completed();
});
@@ -440,7 +346,369 @@ describe('OpenAI integration (V6)', () => {
test('creates openai related spans with sendDefaultPii: true (v6)', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_CHAT })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(chatCompletionSpan!.status).toBe('ok');
+ expect(chatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(chatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.7,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"What is the capital of France?"}]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"type":"text","content":"You are a helpful assistant."}]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-mock123',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["Hello from OpenAI mock!"]',
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 15,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 25,
+ });
+
+ const responsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_mock456',
+ );
+ expect(responsesSpan).toBeDefined();
+ expect(responsesSpan!.name).toBe('chat gpt-3.5-turbo');
+ expect(responsesSpan!.status).toBe('ok');
+ expect(responsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(responsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({ type: 'string', value: 'openai' });
+ expect(responsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Translate this to French: Hello',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Response to: Translate this to French: Hello',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["completed"]',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-3.5-turbo',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_mock456',
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 5,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 8,
+ });
+ expect(responsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 13,
+ });
+
+ const nonStreamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE] === undefined,
+ );
+ expect(nonStreamingErrorSpan).toBeDefined();
+ expect(nonStreamingErrorSpan!.name).toBe('chat error-model');
+ expect(nonStreamingErrorSpan!.status).toBe('error');
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(nonStreamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(nonStreamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"This will fail"}]',
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.name).toBe('chat gpt-4');
+ expect(streamingChatCompletionSpan!.status).toBe('ok');
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingChatCompletionSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE]).toEqual({
+ type: 'double',
+ value: 0.8,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"Tell me about streaming"}]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"type":"text","content":"You are a helpful assistant."}]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Hello from OpenAI streaming!',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["stop"]',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chatcmpl-stream-123',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 12,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 18,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 30,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingResponsesSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'resp_stream_456',
+ );
+ expect(streamingResponsesSpan).toBeDefined();
+ expect(streamingResponsesSpan!.name).toBe('chat gpt-4');
+ expect(streamingResponsesSpan!.status).toBe('ok');
+ expect(streamingResponsesSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingResponsesSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Test streaming responses API',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Streaming response to: Test streaming responses APITest streaming responses API',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["in_progress","completed"]',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'resp_stream_456',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'gpt-4',
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 6,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 16,
+ });
+ expect(streamingResponsesSpan!.attributes[GEN_AI_RESPONSE_STREAMING_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+
+ const streamingErrorSpan = container.items.find(
+ span =>
+ span.name === 'chat error-model' && span.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]?.value === true,
+ );
+ expect(streamingErrorSpan).toBeDefined();
+ expect(streamingErrorSpan!.name).toBe('chat error-model');
+ expect(streamingErrorSpan!.status).toBe('error');
+ expect(streamingErrorSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'chat',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '[{"role":"user","content":"This will fail"}]',
+ });
+ expect(streamingErrorSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.chat',
+ });
+ expect(streamingErrorSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ },
+ })
.start()
.completed();
});
@@ -460,7 +728,60 @@ describe('OpenAI integration (V6)', () => {
test('creates openai related spans with custom options (v6)', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(6);
+ const chatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-mock123',
+ );
+ expect(chatCompletionSpan).toBeDefined();
+ expect(chatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toBeUndefined();
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]).toMatchObject({
+ type: 'integer',
+ value: 1,
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(chatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+
+ const streamingChatCompletionSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_ID_ATTRIBUTE]?.value === 'chatcmpl-stream-123',
+ );
+ expect(streamingChatCompletionSpan).toBeDefined();
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_REQUEST_STREAM_ATTRIBUTE]).toEqual({
+ type: 'boolean',
+ value: true,
+ });
+ expect(
+ streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE],
+ ).toMatchObject({
+ type: 'integer',
+ value: 1,
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ expect(streamingChatCompletionSpan!.attributes[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]).toMatchObject({
+ type: 'string',
+ value: expect.any(String),
+ });
+ },
+ })
.start()
.completed();
});
@@ -480,7 +801,130 @@ describe('OpenAI integration (V6)', () => {
test('creates openai related spans with sendDefaultPii: false (v6)', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE_EMBEDDINGS })
+ .expect({
+ transaction: {
+ transaction: 'main',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const singleEmbeddingSpan = container.items.find(
+ span =>
+ span.name === 'embeddings text-embedding-3-small' &&
+ span.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] !== undefined,
+ );
+ expect(singleEmbeddingSpan).toBeDefined();
+ expect(singleEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(singleEmbeddingSpan!.status).toBe('ok');
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'float',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1536,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+
+ const errorEmbeddingSpan = container.items.find(span => span.name === 'embeddings error-model');
+ expect(errorEmbeddingSpan).toBeDefined();
+ expect(errorEmbeddingSpan!.name).toBe('embeddings error-model');
+ expect(errorEmbeddingSpan!.status).toBe('error');
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+
+ const multiEmbeddingSpan = container.items.find(
+ span =>
+ span.name === 'embeddings text-embedding-3-small' &&
+ span.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE] === undefined,
+ );
+ expect(multiEmbeddingSpan).toBeDefined();
+ expect(multiEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(multiEmbeddingSpan!.status).toBe('ok');
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ },
+ })
.start()
.completed();
});
@@ -500,7 +944,142 @@ describe('OpenAI integration (V6)', () => {
test('creates openai related spans with sendDefaultPii: true (v6)', async () => {
await createRunner()
.ignore('event')
- .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE_EMBEDDINGS })
+ .expect({
+ transaction: {
+ transaction: 'main',
+ },
+ })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const singleEmbeddingSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Embedding test!',
+ );
+ expect(singleEmbeddingSpan).toBeDefined();
+ expect(singleEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(singleEmbeddingSpan!.status).toBe('ok');
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(singleEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_ENCODING_FORMAT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'float',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_REQUEST_DIMENSIONS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 1536,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Embedding test!',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(singleEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+
+ const errorEmbeddingSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Error embedding test!',
+ );
+ expect(errorEmbeddingSpan).toBeDefined();
+ expect(errorEmbeddingSpan!.name).toBe('embeddings error-model');
+ expect(errorEmbeddingSpan!.status).toBe('error');
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(errorEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'error-model',
+ });
+ expect(errorEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'Error embedding test!',
+ });
+
+ const multiEmbeddingSpan = container.items.find(
+ span =>
+ span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value ===
+ '["First input text","Second input text","Third input text"]',
+ );
+ expect(multiEmbeddingSpan).toBeDefined();
+ expect(multiEmbeddingSpan!.name).toBe('embeddings text-embedding-3-small');
+ expect(multiEmbeddingSpan!.status).toBe('ok');
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_OP]).toEqual({
+ type: 'string',
+ value: 'gen_ai.embeddings',
+ });
+ expect(multiEmbeddingSpan!.attributes[SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]).toEqual({
+ type: 'string',
+ value: 'auto.ai.openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'openai',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: '["First input text","Second input text","Third input text"]',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]).toEqual({
+ type: 'string',
+ value: 'text-embedding-3-small',
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ expect(multiEmbeddingSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]).toEqual({
+ type: 'integer',
+ value: 10,
+ });
+ },
+ })
.start()
.completed();
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/postgres/package.json b/dev-packages/node-integration-tests/suites/tracing/postgres/package.json
index 602eab3a6f3d..040e636bcb69 100644
--- a/dev-packages/node-integration-tests/suites/tracing/postgres/package.json
+++ b/dev-packages/node-integration-tests/suites/tracing/postgres/package.json
@@ -10,7 +10,7 @@
"author": "",
"license": "ISC",
"dependencies": {
- "pg": "8.16.0",
- "pg-native": "3.5.0"
+ "pg": "8.20.0",
+ "pg-native": "3.7.0"
}
}
diff --git a/dev-packages/node-integration-tests/suites/tracing/postgres/test.ts b/dev-packages/node-integration-tests/suites/tracing/postgres/test.ts
index 98c42976498a..e5d0e3f26fa8 100644
--- a/dev-packages/node-integration-tests/suites/tracing/postgres/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/postgres/test.ts
@@ -1,5 +1,6 @@
import { describe, expect, test } from 'vitest';
import { createRunner } from '../../../utils/runner';
+import { conditionalTest } from '../../../utils';
describe('postgres auto instrumentation', () => {
test('should auto-instrument `pg` package', { timeout: 90_000 }, async () => {
@@ -49,7 +50,6 @@ describe('postgres auto instrumentation', () => {
await createRunner(__dirname, 'scenario.js')
.withDockerCompose({
workingDirectory: [__dirname],
- setupCommand: 'yarn',
})
.expect({ transaction: EXPECTED_TRANSACTION })
.start()
@@ -60,7 +60,6 @@ describe('postgres auto instrumentation', () => {
await createRunner(__dirname, 'scenario-ignoreConnect.js')
.withDockerCompose({
workingDirectory: [__dirname],
- setupCommand: 'yarn',
})
.expect({
transaction: txn => {
@@ -103,57 +102,59 @@ describe('postgres auto instrumentation', () => {
.completed();
});
- test('should auto-instrument `pg-native` package', { timeout: 90_000 }, async () => {
- const EXPECTED_TRANSACTION = {
- transaction: 'Test Transaction',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- 'db.system': 'postgresql',
- 'db.name': 'tests',
- 'sentry.origin': 'manual',
- 'sentry.op': 'db',
+ conditionalTest({ max: 25 })('pg-native', () => {
+ test('should auto-instrument `pg-native` package', { timeout: 90_000 }, async () => {
+ const EXPECTED_TRANSACTION = {
+ transaction: 'Test Transaction',
+ spans: expect.arrayContaining([
+ expect.objectContaining({
+ data: expect.objectContaining({
+ 'db.system': 'postgresql',
+ 'db.name': 'tests',
+ 'sentry.origin': 'manual',
+ 'sentry.op': 'db',
+ }),
+ description: 'pg.connect',
+ op: 'db',
+ status: 'ok',
}),
- description: 'pg.connect',
- op: 'db',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'db.system': 'postgresql',
- 'db.name': 'tests',
- 'db.statement': 'INSERT INTO "NativeUser" ("email", "name") VALUES ($1, $2)',
- 'sentry.origin': 'auto.db.otel.postgres',
- 'sentry.op': 'db',
+ expect.objectContaining({
+ data: expect.objectContaining({
+ 'db.system': 'postgresql',
+ 'db.name': 'tests',
+ 'db.statement': 'INSERT INTO "NativeUser" ("email", "name") VALUES ($1, $2)',
+ 'sentry.origin': 'auto.db.otel.postgres',
+ 'sentry.op': 'db',
+ }),
+ description: 'INSERT INTO "NativeUser" ("email", "name") VALUES ($1, $2)',
+ op: 'db',
+ status: 'ok',
+ origin: 'auto.db.otel.postgres',
}),
- description: 'INSERT INTO "NativeUser" ("email", "name") VALUES ($1, $2)',
- op: 'db',
- status: 'ok',
- origin: 'auto.db.otel.postgres',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'db.system': 'postgresql',
- 'db.name': 'tests',
- 'db.statement': 'SELECT * FROM "NativeUser"',
- 'sentry.origin': 'auto.db.otel.postgres',
- 'sentry.op': 'db',
+ expect.objectContaining({
+ data: expect.objectContaining({
+ 'db.system': 'postgresql',
+ 'db.name': 'tests',
+ 'db.statement': 'SELECT * FROM "NativeUser"',
+ 'sentry.origin': 'auto.db.otel.postgres',
+ 'sentry.op': 'db',
+ }),
+ description: 'SELECT * FROM "NativeUser"',
+ op: 'db',
+ status: 'ok',
+ origin: 'auto.db.otel.postgres',
}),
- description: 'SELECT * FROM "NativeUser"',
- op: 'db',
- status: 'ok',
- origin: 'auto.db.otel.postgres',
- }),
- ]),
- };
+ ]),
+ };
- await createRunner(__dirname, 'scenario-native.js')
- .withDockerCompose({
- workingDirectory: [__dirname],
- setupCommand: 'yarn',
- })
- .expect({ transaction: EXPECTED_TRANSACTION })
- .start()
- .completed();
+ await createRunner(__dirname, 'scenario-native.js')
+ .withDockerCompose({
+ workingDirectory: [__dirname],
+ setupCommand: 'yarn',
+ })
+ .expect({ transaction: EXPECTED_TRANSACTION })
+ .start()
+ .completed();
+ });
});
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/postgres/yarn.lock b/dev-packages/node-integration-tests/suites/tracing/postgres/yarn.lock
index 9eb59e69f6c2..8b2e296ece0b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/postgres/yarn.lock
+++ b/dev-packages/node-integration-tests/suites/tracing/postgres/yarn.lock
@@ -27,38 +27,38 @@ nan@~2.22.2:
resolved "https://registry.yarnpkg.com/nan/-/nan-2.22.2.tgz#6b504fd029fb8f38c0990e52ad5c26772fdacfbb"
integrity sha512-DANghxFkS1plDdRsX0X9pm0Z6SJNN6gBdtXfanwoZ8hooC5gosGFSBGRYHUVPz1asKA/kMRqDRdHrluZ61SpBQ==
-pg-cloudflare@^1.2.5:
- version "1.2.5"
- resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.2.5.tgz#2e3649c38a7a9c74a7e5327c8098a2fd9af595bd"
- integrity sha512-OOX22Vt0vOSRrdoUPKJ8Wi2OpE/o/h9T8X1s4qSkCedbNah9ei2W2765be8iMVxQUsvgT7zIAT2eIa9fs5+vtg==
+pg-cloudflare@^1.3.0:
+ version "1.3.0"
+ resolved "https://registry.yarnpkg.com/pg-cloudflare/-/pg-cloudflare-1.3.0.tgz#386035d4bfcf1a7045b026f8b21acf5353f14d65"
+ integrity sha512-6lswVVSztmHiRtD6I8hw4qP/nDm1EJbKMRhf3HCYaqud7frGysPv7FYJ5noZQdhQtN2xJnimfMtvQq21pdbzyQ==
-pg-connection-string@^2.9.0:
- version "2.9.0"
- resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.9.0.tgz#f75e06591fdd42ec7636fe2c6a03febeedbec9bf"
- integrity sha512-P2DEBKuvh5RClafLngkAuGe9OUlFV7ebu8w1kmaaOgPcpJd1RIFh7otETfI6hAR8YupOLFTY7nuvvIn7PLciUQ==
+pg-connection-string@^2.12.0:
+ version "2.12.0"
+ resolved "https://registry.yarnpkg.com/pg-connection-string/-/pg-connection-string-2.12.0.tgz#4084f917902bb2daae3dc1376fe24ac7b4eaccf2"
+ integrity sha512-U7qg+bpswf3Cs5xLzRqbXbQl85ng0mfSV/J0nnA31MCLgvEaAo7CIhmeyrmJpOr7o+zm0rXK+hNnT5l9RHkCkQ==
pg-int8@1.0.1:
version "1.0.1"
resolved "https://registry.yarnpkg.com/pg-int8/-/pg-int8-1.0.1.tgz#943bd463bf5b71b4170115f80f8efc9a0c0eb78c"
integrity sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==
-pg-native@3.5.0:
- version "3.5.0"
- resolved "https://registry.yarnpkg.com/pg-native/-/pg-native-3.5.0.tgz#1a43c0d5f5744e40df3bf737c43178ce98984255"
- integrity sha512-rj4LYouevTdKxvRLnvtOLEPOerkiPAqUdZE1K48IfQluEH/x7GrldEDdSaEOmJ6z7s6LQwDTpAPhm2s00iG8xw==
+pg-native@3.7.0:
+ version "3.7.0"
+ resolved "https://registry.yarnpkg.com/pg-native/-/pg-native-3.7.0.tgz#1bd78031482c78dc5240c4350cddb291493dc34f"
+ integrity sha512-q2V5DynvPt4PD75q1DqZOUrieEgE4bf/flEeLCzzs8axgn8x2mRCUhd1DP0cqMz8FEdpVEDb0/zKblQWeijbGg==
dependencies:
libpq "^1.8.15"
pg-types "2.2.0"
-pg-pool@^3.10.0:
- version "3.10.0"
- resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.10.0.tgz#134b0213755c5e7135152976488aa7cd7ee1268d"
- integrity sha512-DzZ26On4sQ0KmqnO34muPcmKbhrjmyiO4lCCR0VwEd7MjmiKf5NTg/6+apUEu0NF7ESa37CGzFxH513CoUmWnA==
+pg-pool@^3.13.0:
+ version "3.13.0"
+ resolved "https://registry.yarnpkg.com/pg-pool/-/pg-pool-3.13.0.tgz#416482e9700e8f80c685a6ae5681697a413c13a3"
+ integrity sha512-gB+R+Xud1gLFuRD/QgOIgGOBE2KCQPaPwkzBBGC9oG69pHTkhQeIuejVIk3/cnDyX39av2AxomQiyPT13WKHQA==
-pg-protocol@^1.10.0:
- version "1.10.0"
- resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.10.0.tgz#a473afcbb1c6e5dc3ac24869ba3dd563f8a1ae1b"
- integrity sha512-IpdytjudNuLv8nhlHs/UrVBhU0e78J0oIS/0AVdTbWxSOkFUVdsHC/NrorO6nXsQNDTT1kzDSOMJubBQviX18Q==
+pg-protocol@^1.13.0:
+ version "1.13.0"
+ resolved "https://registry.yarnpkg.com/pg-protocol/-/pg-protocol-1.13.0.tgz#fdaf6d020bca590d58bb991b4b16fc448efe0511"
+ integrity sha512-zzdvXfS6v89r6v7OcFCHfHlyG/wvry1ALxZo4LqgUoy7W9xhBDMaqOuMiF3qEV45VqsN6rdlcehHrfDtlCPc8w==
pg-types@2.2.0:
version "2.2.0"
@@ -71,18 +71,18 @@ pg-types@2.2.0:
postgres-date "~1.0.4"
postgres-interval "^1.1.0"
-pg@8.16.0:
- version "8.16.0"
- resolved "https://registry.yarnpkg.com/pg/-/pg-8.16.0.tgz#40b08eedb5eb1834252cf3e3629503e32e6c6c04"
- integrity sha512-7SKfdvP8CTNXjMUzfcVTaI+TDzBEeaUnVwiVGZQD1Hh33Kpev7liQba9uLd4CfN8r9mCVsD0JIpq03+Unpz+kg==
+pg@8.20.0:
+ version "8.20.0"
+ resolved "https://registry.yarnpkg.com/pg/-/pg-8.20.0.tgz#1a274de944cb329fd6dd77a6d371a005ba6b136d"
+ integrity sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==
dependencies:
- pg-connection-string "^2.9.0"
- pg-pool "^3.10.0"
- pg-protocol "^1.10.0"
+ pg-connection-string "^2.12.0"
+ pg-pool "^3.13.0"
+ pg-protocol "^1.13.0"
pg-types "2.2.0"
pgpass "1.0.5"
optionalDependencies:
- pg-cloudflare "^1.2.5"
+ pg-cloudflare "^1.3.0"
pgpass@1.0.5:
version "1.0.5"
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-no-truncation.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-no-truncation.mjs
index 0593d975c8d7..59638403d00f 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-no-truncation.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-no-truncation.mjs
@@ -14,4 +14,5 @@ Sentry.init({
enableTruncation: false,
}),
],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs
index b798e21228f5..ddc247cc2d41 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument-with-pii.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs
index 5e898ee1949d..a76d206a0b61 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/instrument.mjs
@@ -7,4 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts
index 39e13d5425c2..54c64bc2172b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test-generate-object.ts
@@ -6,62 +6,45 @@ describe('Vercel AI integration - generateObject', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // generateObject span
- expect.objectContaining({
- data: expect.objectContaining({
- 'vercel.ai.model.id': 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateObject',
- 'vercel.ai.pipeline.name': 'generateObject',
- 'vercel.ai.streaming': false,
- 'vercel.ai.settings.mode': 'json',
- 'vercel.ai.settings.output': 'object',
- 'gen_ai.request.schema': expect.any(String),
- 'gen_ai.response.model': 'mock-model-id',
- 'gen_ai.usage.input_tokens': 15,
- 'gen_ai.usage.output_tokens': 25,
- 'gen_ai.usage.total_tokens': 40,
- 'gen_ai.operation.name': 'invoke_agent',
- 'sentry.op': 'gen_ai.invoke_agent',
- 'sentry.origin': 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // generateObject.doGenerate span
- expect.objectContaining({
- data: expect.objectContaining({
- 'sentry.origin': 'auto.vercelai.otel',
- 'sentry.op': 'gen_ai.generate_content',
- 'gen_ai.operation.name': 'generate_content',
- 'vercel.ai.operationId': 'ai.generateObject.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.model.id': 'mock-model-id',
- 'vercel.ai.pipeline.name': 'generateObject.doGenerate',
- 'vercel.ai.streaming': false,
- 'gen_ai.system': 'mock-provider',
- 'gen_ai.request.model': 'mock-model-id',
- 'gen_ai.response.model': 'mock-model-id',
- 'gen_ai.usage.input_tokens': 15,
- 'gen_ai.usage.output_tokens': 25,
- 'gen_ai.usage.total_tokens': 40,
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario-generate-object.mjs', 'instrument.mjs', (createRunner, test) => {
test('captures generateObject spans with schema attributes', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const [firstSpan, secondSpan] = container.items;
+
+ // [0] generateObject (invoke_agent)
+ expect(firstSpan!.name).toBe('invoke_agent');
+ expect(firstSpan!.status).toBe('ok');
+ expect(firstSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateObject');
+ expect(firstSpan!.attributes['sentry.origin'].value).toBe('auto.vercelai.otel');
+ expect(firstSpan!.attributes['gen_ai.operation.name'].value).toBe('invoke_agent');
+ expect(firstSpan!.attributes['gen_ai.response.model'].value).toBe('mock-model-id');
+ expect(firstSpan!.attributes['gen_ai.usage.input_tokens'].value).toBe(15);
+ expect(firstSpan!.attributes['gen_ai.usage.output_tokens'].value).toBe(25);
+ expect(firstSpan!.attributes['gen_ai.usage.total_tokens'].value).toBe(40);
+ expect(firstSpan!.attributes['gen_ai.request.schema']).toBeDefined();
+
+ // [1] generateObject.doGenerate (generate_content)
+ expect(secondSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondSpan!.status).toBe('ok');
+ expect(secondSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(secondSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateObject.doGenerate');
+ expect(secondSpan!.attributes['sentry.origin'].value).toBe('auto.vercelai.otel');
+ expect(secondSpan!.attributes['gen_ai.operation.name'].value).toBe('generate_content');
+ expect(secondSpan!.attributes['gen_ai.system'].value).toBe('mock-provider');
+ expect(secondSpan!.attributes['gen_ai.request.model'].value).toBe('mock-model-id');
+ expect(secondSpan!.attributes['gen_ai.response.model'].value).toBe('mock-model-id');
+ expect(secondSpan!.attributes['gen_ai.usage.input_tokens'].value).toBe(15);
+ expect(secondSpan!.attributes['gen_ai.usage.output_tokens'].value).toBe(25);
+ expect(secondSpan!.attributes['gen_ai.usage.total_tokens'].value).toBe(40);
+ },
+ })
+ .start()
+ .completed();
});
});
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts
index d75a1faf8ea0..3001a07765b2 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts
@@ -1,4 +1,3 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import type { Event } from '@sentry/node';
import { afterAll, describe, expect } from 'vitest';
import {
@@ -9,8 +8,6 @@ import {
GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
- GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
- GEN_AI_RESPONSE_ID_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_SYSTEM_ATTRIBUTE,
GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE,
@@ -31,576 +28,284 @@ describe('Vercel AI integration', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fourth span - doGenerate for explicit telemetry enabled call
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.prompt.format': expect.any(String),
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fifth span - tool call generateText span
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Sixth span - tool call doGenerate span
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Seventh span - tool call execution span
- // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded
- expect.objectContaining({
- data: {
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.operationId': 'ai.toolCall',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_AVAILABLE_TOOLS_JSON =
- '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]';
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.prompt.format': 'prompt',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Fourth span - doGenerate for explicitly enabled telemetry call
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.prompt.format': expect.any(String),
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: {
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.prompt.format': expect.any(String),
- 'vercel.ai.prompt.toolChoice': expect.any(String),
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- // Seventh span - tool call execution span
- expect.objectContaining({
- data: {
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location',
- [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.operationId': 'ai.toolCall',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- parent_span_id: expect.any(String),
- span_id: expect.any(String),
- start_timestamp: expect.any(Number),
- timestamp: expect.any(Number),
- trace_id: expect.any(String),
- }),
- ]),
- };
-
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates ai related spans with sendDefaultPii: false', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value === 10,
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(30);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText.doGenerate' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined,
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('mock-provider');
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(secondGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(secondGenerateContentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'Second span here!',
+ );
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' && span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+ expect(toolInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(25);
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(40);
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+ expect(toolGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function');
+ },
+ })
+ .start()
+ .completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('creates ai related spans with sendDefaultPii: true', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('First span here!'),
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'First span here!',
+ );
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+ expect(toolInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]?.value?.includes('getWeather'),
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+ expect(toolGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE].value).toContain(
+ 'getWeather',
+ );
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE].value).toBe(
+ 'Get the current weather for a location',
+ );
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE]).toBeDefined();
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_OUTPUT_ATTRIBUTE]).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario-error-in-tool.mjs', 'instrument.mjs', (createRunner, test) => {
test('captures error in tool', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- expect.objectContaining({
- data: {
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.operationId': 'ai.toolCall',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- ]),
-
- tags: {
- 'test-tag': 'test-value',
- },
- };
-
let traceId: string = 'unset-trace-id';
let spanId: string = 'unset-span-id';
- const expectedError = {
- contexts: {
- trace: {
- span_id: expect.any(String),
- trace_id: expect.any(String),
- },
- },
- exception: {
- values: expect.arrayContaining([
- expect.objectContaining({
- type: 'AI_ToolExecutionError',
- value: 'Error executing tool getWeather: Error in tool',
- }),
- ]),
- },
- tags: {
- 'test-tag': 'test-value',
- },
- };
-
await createRunner()
.expect({
transaction: transaction => {
- expect(transaction).toMatchObject(expectedTransaction);
+ expect(transaction.transaction).toBe('main');
+ // gen_ai spans should be empty in transaction
+ expect(transaction.spans).toEqual([]);
traceId = transaction.contexts!.trace!.trace_id;
spanId = transaction.contexts!.trace!.span_id;
},
})
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const invokeAgentSpan = container.items.find(
+ span => span.name === 'invoke_agent' && span.status === 'error',
+ );
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.status).toBe('error');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(generateContentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText.doGenerate');
+
+ const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.name).toBe('execute_tool getWeather');
+ expect(toolSpan!.status).toBe('error');
+ expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ },
+ })
.expect({
event: event => {
- expect(event).toMatchObject(expectedError);
+ expect(event.exception?.values).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({
+ type: 'AI_ToolExecutionError',
+ value: 'Error executing tool getWeather: Error in tool',
+ }),
+ ]),
+ );
+ expect(event.tags).toMatchObject({ 'test-tag': 'test-value' });
expect(event.contexts!.trace!.trace_id).toBe(traceId);
expect(event.contexts!.trace!.span_id).toBe(spanId);
},
@@ -612,101 +317,6 @@ describe('Vercel AI integration', () => {
createEsmAndCjsTests(__dirname, 'scenario-error-in-tool-express.mjs', 'instrument.mjs', (createRunner, test) => {
test('captures error in tool in express server', async () => {
- const expectedTransaction = {
- transaction: 'GET /test/error-in-tool',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.settings.maxSteps': 1,
- 'vercel.ai.streaming': false,
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- expect.objectContaining({
- data: {
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.operationId': 'ai.toolCall',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- ]),
-
- tags: {
- 'test-tag': 'test-value',
- },
- };
-
- const expectedError = {
- contexts: {
- trace: {
- span_id: expect.any(String),
- trace_id: expect.any(String),
- },
- },
- exception: {
- values: expect.arrayContaining([
- expect.objectContaining({
- type: 'AI_ToolExecutionError',
- value: 'Error executing tool getWeather: Error in tool',
- }),
- ]),
- },
- tags: {
- 'test-tag': 'test-value',
- },
- };
-
let transactionEvent: Event | undefined;
let errorEvent: Event | undefined;
@@ -716,6 +326,32 @@ describe('Vercel AI integration', () => {
transactionEvent = transaction;
},
})
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const invokeAgentSpan = container.items.find(
+ span => span.name === 'invoke_agent' && span.status === 'error',
+ );
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.status).toBe('error');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.name).toBe('execute_tool getWeather');
+ expect(toolSpan!.status).toBe('error');
+ expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ },
+ })
.expect({
event: event => {
errorEvent = event;
@@ -727,11 +363,19 @@ describe('Vercel AI integration', () => {
await runner.completed();
expect(transactionEvent).toBeDefined();
- expect(errorEvent).toBeDefined();
-
- expect(transactionEvent).toMatchObject(expectedTransaction);
+ expect(transactionEvent!.transaction).toBe('GET /test/error-in-tool');
+ expect(transactionEvent!.tags).toMatchObject({ 'test-tag': 'test-value' });
- expect(errorEvent).toMatchObject(expectedError);
+ expect(errorEvent).toBeDefined();
+ expect(errorEvent!.exception?.values).toEqual(
+ expect.arrayContaining([
+ expect.objectContaining({
+ type: 'AI_ToolExecutionError',
+ value: 'Error executing tool getWeather: Error in tool',
+ }),
+ ]),
+ );
+ expect(errorEvent!.tags).toMatchObject({ 'test-tag': 'test-value' });
expect(errorEvent!.contexts!.trace!.trace_id).toBe(transactionEvent!.contexts!.trace!.trace_id);
expect(errorEvent!.contexts!.trace!.span_id).toBe(transactionEvent!.contexts!.trace!.span_id);
});
@@ -739,37 +383,30 @@ describe('Vercel AI integration', () => {
createEsmAndCjsTests(__dirname, 'scenario-late-model-id.mjs', 'instrument.mjs', (createRunner, test) => {
test('sets op correctly even when model ID is not available at span start', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // The generateText span should have the correct op even though model ID was not available at span start
- expect.objectContaining({
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- }),
- }),
- // The doGenerate span - name stays as 'generateText.doGenerate' since model ID is missing
- expect.objectContaining({
- description: 'generateText.doGenerate',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- }),
- }),
- ]),
- };
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.status).toBe('ok');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.origin'].value).toBe('auto.vercelai.otel');
+ expect(invokeAgentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('invoke_agent');
- await createRunner().expect({ transaction: expectedTransaction }).start().completed();
+ const generateContentSpan = container.items.find(span => span.name === 'generateText.doGenerate');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generateText.doGenerate');
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(generateContentSpan!.attributes['sentry.origin'].value).toBe('auto.vercelai.otel');
+ expect(generateContentSpan!.attributes[GEN_AI_OPERATION_NAME_ATTRIBUTE].value).toBe('generate_content');
+ },
+ })
+ .start()
+ .completed();
});
});
@@ -781,18 +418,22 @@ describe('Vercel AI integration', () => {
test('extracts system instructions from messages', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE]: JSON.stringify([
- { type: 'text', content: 'You are a helpful assistant' },
- ]),
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes[GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE].value).toBe(
+ JSON.stringify([{ type: 'text', content: 'You are a helpful assistant' }]),
+ );
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
},
})
.start()
@@ -809,27 +450,41 @@ describe('Vercel AI integration', () => {
test('truncates messages when they exceed byte limit', async () => {
await createRunner()
.ignore('event')
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First call: Last message truncated (only C's remain, D's are cropped)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/),
- }),
- }),
- // Second call: Last message is small and kept intact
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining(
- 'This is a small message that fits within the limit',
- ),
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const truncatedInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.match(/^\[.*"(?:text|content)":"C+".*\]$/),
+ );
+ expect(truncatedInvokeAgentSpan).toBeDefined();
+ expect(truncatedInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(truncatedInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(truncatedInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(
+ 3,
+ );
+ expect(truncatedInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toMatch(
+ /^\[.*"(?:text|content)":"C+".*\]$/,
+ );
+
+ const smallMessageInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value?.includes(
+ 'This is a small message that fits within the limit',
+ ),
+ );
+ expect(smallMessageInvokeAgentSpan).toBeDefined();
+ expect(smallMessageInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(smallMessageInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(
+ smallMessageInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value,
+ ).toBe(3);
+ expect(smallMessageInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'This is a small message that fits within the limit',
+ );
},
})
.start()
@@ -840,110 +495,87 @@ describe('Vercel AI integration', () => {
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates embedding related spans with sendDefaultPii: false', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // embed doEmbed span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- }),
- description: 'embeddings mock-model-id',
- op: 'gen_ai.embeddings',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // embedMany doEmbed span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'embeddings mock-model-id',
- op: 'gen_ai.embeddings',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const embedSpan = container.items.find(
+ span => span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 10,
+ );
+ expect(embedSpan).toBeDefined();
+ expect(embedSpan!.name).toBe('embeddings mock-model-id');
+ expect(embedSpan!.status).toBe('ok');
+ expect(embedSpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(embedSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(embedSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
- await createRunner().expect({ transaction: expectedTransaction }).start().completed();
+ const embedManySpan = container.items.find(
+ span => span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 20,
+ );
+ expect(embedManySpan).toBeDefined();
+ expect(embedManySpan!.name).toBe('embeddings mock-model-id');
+ expect(embedManySpan!.status).toBe('ok');
+ expect(embedManySpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(embedManySpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ },
+ })
+ .start()
+ .completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario-embeddings.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('creates embedding related spans with sendDefaultPii: true', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // embed doEmbed span with input
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: 'Embedding test!',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 10,
- }),
- description: 'embeddings mock-model-id',
- op: 'gen_ai.embeddings',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // embedMany doEmbed span with input
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.embeddings',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]: '["First input","Second input"]',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 20,
- }),
- description: 'embeddings mock-model-id',
- op: 'gen_ai.embeddings',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const embedSpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === 'Embedding test!',
+ );
+ expect(embedSpan).toBeDefined();
+ expect(embedSpan!.name).toBe('embeddings mock-model-id');
+ expect(embedSpan!.status).toBe('ok');
+ expect(embedSpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(embedSpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE].value).toBe('Embedding test!');
- await createRunner().expect({ transaction: expectedTransaction }).start().completed();
+ const embedManySpan = container.items.find(
+ span => span.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE]?.value === '["First input","Second input"]',
+ );
+ expect(embedManySpan).toBeDefined();
+ expect(embedManySpan!.name).toBe('embeddings mock-model-id');
+ expect(embedManySpan!.status).toBe('ok');
+ expect(embedManySpan!.attributes['sentry.op'].value).toBe('gen_ai.embeddings');
+ expect(embedManySpan!.attributes[GEN_AI_EMBEDDINGS_INPUT_ATTRIBUTE].value).toBe(
+ '["First input","Second input"]',
+ );
+ },
+ })
+ .start()
+ .completed();
});
});
createEsmAndCjsTests(__dirname, 'scenario-conversation-id.mjs', 'instrument.mjs', (createRunner, test) => {
test('does not overwrite conversation id set via Sentry.setConversationId with responseId from provider metadata', async () => {
await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- op: 'gen_ai.invoke_agent',
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv-a',
- }),
- }),
- expect.objectContaining({
- op: 'gen_ai.generate_content',
- data: expect.objectContaining({
- 'gen_ai.conversation.id': 'conv-a',
- }),
- }),
- ]),
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes['gen_ai.conversation.id'].value).toBe('conv-a');
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(generateContentSpan!.attributes['gen_ai.conversation.id'].value).toBe('conv-a');
},
})
.start()
@@ -960,22 +592,36 @@ describe('Vercel AI integration', () => {
(createRunner, test) => {
test('does not truncate input messages when enableTruncation is false', async () => {
await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
.expect({
- transaction: {
- transaction: 'main',
- spans: expect.arrayContaining([
- // Multiple messages should all be preserved (no popping to last message only)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: JSON.stringify([
+ span: container => {
+ expect(container.items).toHaveLength(2);
+ const invokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ JSON.stringify([
{ role: 'user', content: longContent },
{ role: 'assistant', content: 'Some reply' },
{ role: 'user', content: 'Follow-up question' },
]),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
- }),
- }),
- ]),
+ );
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ JSON.stringify([
+ { role: 'user', content: longContent },
+ { role: 'assistant', content: 'Some reply' },
+ { role: 'user', content: 'Follow-up question' },
+ ]),
+ );
+ expect(invokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE].value).toBe(3);
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
},
})
.start()
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs
index b798e21228f5..ddc247cc2d41 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument-with-pii.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs
index 5e898ee1949d..a76d206a0b61 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/instrument.mjs
@@ -7,4 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts
index e59a5545d7cf..515f6cdc8c6e 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts
@@ -1,15 +1,10 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import type { Event } from '@sentry/node';
import { afterAll, describe, expect } from 'vitest';
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
- GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
- GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
- GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
- GEN_AI_RESPONSE_ID_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_SYSTEM_ATTRIBUTE,
GEN_AI_TOOL_CALL_ID_ATTRIBUTE,
@@ -29,415 +24,109 @@ describe('Vercel AI integration (V5)', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false
- expect.objectContaining({
- data: {
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- 'vercel.ai.response.finishReason': 'stop',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fourth span - doGenerate for explicit telemetry enabled call
- expect.objectContaining({
- data: {
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fifth span - tool call generateText span
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Sixth span - tool call doGenerate span
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Seventh span - tool call execution span
- // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded
- expect.objectContaining({
- data: {
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_AVAILABLE_TOOLS_JSON =
- '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]';
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fourth span - doGenerate for explicitly enabled telemetry call
- expect.objectContaining({
- data: {
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- [GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]',
- 'vercel.ai.prompt.toolChoice': expect.any(String),
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Seventh span - tool call execution span
- expect.objectContaining({
- data: expect.objectContaining({
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario.mjs',
'instrument.mjs',
(createRunner, test) => {
test('creates ai related spans with sendDefaultPii: false', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value === 10,
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(30);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText.doGenerate' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined,
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('mock-provider');
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' && span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function');
+ },
+ })
+ .start()
+ .completed();
});
},
{
@@ -453,7 +142,110 @@ describe('Vercel AI integration (V5)', () => {
'instrument-with-pii.mjs',
(createRunner, test) => {
test('creates ai related spans with sendDefaultPii: true', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('First span here!'),
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'First span here!',
+ );
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] !== undefined,
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+ expect(toolGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toBeDefined();
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE].value).toBe(
+ 'Get the current weather for a location',
+ );
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE]).toBeDefined();
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_OUTPUT_ATTRIBUTE]).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
},
{
@@ -469,84 +261,6 @@ describe('Vercel AI integration (V5)', () => {
'instrument.mjs',
(createRunner, test) => {
test('captures error in tool', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.response.finishReason': 'tool-calls',
- },
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- }),
- expect.objectContaining({
- data: {
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- expect.objectContaining({
- data: {
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- },
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- ]),
- };
-
- const expectedError = {
- level: 'error',
- tags: expect.objectContaining({
- 'vercel.ai.tool.name': 'getWeather',
- 'vercel.ai.tool.callId': 'call-1',
- }),
- };
-
let transactionEvent: Event | undefined;
let errorEvent: Event | undefined;
@@ -556,6 +270,28 @@ describe('Vercel AI integration (V5)', () => {
transactionEvent = transaction;
},
})
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.name).toBe('execute_tool getWeather');
+ expect(toolSpan!.status).toBe('error');
+ expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ },
+ })
.expect({
event: event => {
errorEvent = event;
@@ -565,10 +301,16 @@ describe('Vercel AI integration (V5)', () => {
.completed();
expect(transactionEvent).toBeDefined();
- expect(transactionEvent).toMatchObject(expectedTransaction);
+ expect(transactionEvent!.transaction).toBe('main');
expect(errorEvent).toBeDefined();
- expect(errorEvent).toMatchObject(expectedError);
+ expect(errorEvent!.level).toBe('error');
+ expect(errorEvent!.tags).toEqual(
+ expect.objectContaining({
+ 'vercel.ai.tool.name': 'getWeather',
+ 'vercel.ai.tool.callId': 'call-1',
+ }),
+ );
// Trace id should be the same for the transaction and error event
expect(transactionEvent!.contexts!.trace!.trace_id).toBe(errorEvent!.contexts!.trace!.trace_id);
@@ -587,7 +329,29 @@ describe('Vercel AI integration (V5)', () => {
'instrument.mjs',
(createRunner, test) => {
test('creates ai related spans with v5', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const invokeAgentSpans = container.items.filter(
+ span => span.attributes['sentry.op'].value === 'gen_ai.invoke_agent',
+ );
+ expect(invokeAgentSpans).toHaveLength(3);
+
+ const generateContentSpans = container.items.filter(
+ span => span.attributes['sentry.op'].value === 'gen_ai.generate_content',
+ );
+ expect(generateContentSpans).toHaveLength(3);
+
+ const toolSpan = container.items.find(
+ span => span.attributes['sentry.op'].value === 'gen_ai.execute_tool',
+ );
+ expect(toolSpan).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
},
{
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument-with-pii.mjs
index b798e21228f5..ddc247cc2d41 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument-with-pii.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument-with-pii.mjs
@@ -8,4 +8,5 @@ Sentry.init({
sendDefaultPii: true,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument.mjs
index 5e898ee1949d..a76d206a0b61 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument.mjs
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/instrument.mjs
@@ -7,4 +7,5 @@ Sentry.init({
tracesSampleRate: 1.0,
transport: loggingTransport,
integrations: [Sentry.vercelAIIntegration()],
+ streamGenAiSpans: true,
});
diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
index 1b030804f8d2..037294a69b6b 100644
--- a/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
+++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/v6/test.ts
@@ -1,14 +1,11 @@
-import { SEMANTIC_ATTRIBUTE_SENTRY_OP, SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '@sentry/core';
import type { Event } from '@sentry/node';
import { afterAll, describe, expect } from 'vitest';
import {
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
- GEN_AI_OPERATION_NAME_ATTRIBUTE,
GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE,
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
- GEN_AI_RESPONSE_ID_ATTRIBUTE,
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
GEN_AI_SYSTEM_ATTRIBUTE,
GEN_AI_TOOL_CALL_ID_ATTRIBUTE,
@@ -28,419 +25,109 @@ describe('Vercel AI integration (V6)', () => {
cleanupChildProcesses();
});
- const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false
- expect.objectContaining({
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fourth span - doGenerate for explicit telemetry enabled call
- expect.objectContaining({
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fifth span - tool call generateText span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Sixth span - tool call doGenerate span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Seventh span - tool call execution span
- // Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded
- expect.objectContaining({
- data: expect.objectContaining({
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
- const EXPECTED_AVAILABLE_TOOLS_JSON =
- '[{"type":"function","name":"getWeather","description":"Get the current weather for a location","inputSchema":{"$schema":"http://json-schema.org/draft-07/schema#","type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false}}]';
-
- const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the first span?"}]',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"Where is the second span?"}]',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fourth span - doGenerate for explicitly enabled telemetry call
- expect.objectContaining({
- data: expect.objectContaining({
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.settings.maxRetries': 2,
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.streaming': false,
- 'vercel.ai.response.finishReason': 'stop',
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.timestamp': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.prompt': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]',
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- [GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
- [GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
- '[{"role":"assistant","parts":[{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{\\"location\\":\\"San Francisco\\"}"}],"finish_reason":"tool_call"}]',
- 'vercel.ai.prompt.toolChoice': expect.any(String),
- [GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Seventh span - tool call execution span
- expect.objectContaining({
- data: expect.objectContaining({
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String),
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
-
createEsmAndCjsTests(
__dirname,
'scenario.mjs',
'instrument.mjs',
(createRunner, test) => {
test('creates ai related spans with sendDefaultPii: false', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value === 10,
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE].value).toBe(30);
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes['vercel.ai.operationId'].value === 'ai.generateText.doGenerate' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE] === undefined,
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_SYSTEM_ATTRIBUTE].value).toBe('mock-provider');
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeUndefined();
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' && span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]?.value === 15,
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function');
+ },
+ })
+ .start()
+ .completed();
});
},
{
@@ -456,7 +143,107 @@ describe('Vercel AI integration (V6)', () => {
'instrument-with-pii.mjs',
(createRunner, test) => {
test('creates ai related spans with sendDefaultPii: true', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const firstInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan).toBeDefined();
+ expect(firstInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(firstInvokeAgentSpan!.status).toBe('ok');
+ expect(firstInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(firstInvokeAgentSpan!.attributes['vercel.ai.operationId'].value).toBe('ai.generateText');
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"Where is the first span?"}]',
+ );
+ expect(firstInvokeAgentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
+ );
+
+ const firstGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('First span here!'),
+ );
+ expect(firstGenerateContentSpan).toBeDefined();
+ expect(firstGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(firstGenerateContentSpan!.status).toBe('ok');
+ expect(firstGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(firstGenerateContentSpan!.attributes['vercel.ai.operationId'].value).toBe(
+ 'ai.generateText.doGenerate',
+ );
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]).toBeDefined();
+ expect(firstGenerateContentSpan!.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE].value).toContain(
+ 'First span here!',
+ );
+
+ const secondInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"Where is the second span?"}]',
+ );
+ expect(secondInvokeAgentSpan).toBeDefined();
+ expect(secondInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(secondInvokeAgentSpan!.status).toBe('ok');
+ expect(secondInvokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+
+ const secondGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]?.value?.includes('Second span here!'),
+ );
+ expect(secondGenerateContentSpan).toBeDefined();
+ expect(secondGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(secondGenerateContentSpan!.status).toBe('ok');
+ expect(secondGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolInvokeAgentSpan = container.items.find(
+ span =>
+ span.name === 'invoke_agent' &&
+ span.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]?.value ===
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+ expect(toolInvokeAgentSpan).toBeDefined();
+ expect(toolInvokeAgentSpan!.name).toBe('invoke_agent');
+ expect(toolInvokeAgentSpan!.status).toBe('ok');
+ expect(toolInvokeAgentSpan!.attributes[GEN_AI_INPUT_MESSAGES_ATTRIBUTE].value).toBe(
+ '[{"role":"user","content":"What is the weather in San Francisco?"}]',
+ );
+
+ const toolGenerateContentSpan = container.items.find(
+ span =>
+ span.name === 'generate_content mock-model-id' &&
+ span.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE] !== undefined,
+ );
+ expect(toolGenerateContentSpan).toBeDefined();
+ expect(toolGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolGenerateContentSpan!.status).toBe('ok');
+ expect(toolGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]).toBeDefined();
+ expect(toolGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+
+ const toolExecutionSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolExecutionSpan).toBeDefined();
+ expect(toolExecutionSpan!.name).toBe('execute_tool getWeather');
+ expect(toolExecutionSpan!.status).toBe('ok');
+ expect(toolExecutionSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE].value).toBe(
+ 'Get the current weather for a location',
+ );
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_INPUT_ATTRIBUTE]).toBeDefined();
+ expect(toolExecutionSpan!.attributes[GEN_AI_TOOL_OUTPUT_ATTRIBUTE]).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
},
{
@@ -472,86 +259,6 @@ describe('Vercel AI integration (V6)', () => {
'instrument.mjs',
(createRunner, test) => {
test('captures error in tool', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText',
- 'vercel.ai.pipeline.name': 'generateText',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- 'vercel.ai.response.finishReason': 'tool-calls',
- }),
- description: 'invoke_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- 'vercel.ai.model.provider': 'mock-provider',
- 'vercel.ai.operationId': 'ai.generateText.doGenerate',
- 'vercel.ai.pipeline.name': 'generateText.doGenerate',
- 'vercel.ai.request.headers.user-agent': expect.any(String),
- 'vercel.ai.response.finishReason': 'tool-calls',
- 'vercel.ai.response.id': expect.any(String),
- 'vercel.ai.response.model': 'mock-model-id',
- 'vercel.ai.response.timestamp': expect.any(String),
- 'vercel.ai.settings.maxRetries': 2,
- 'vercel.ai.streaming': false,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- [GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
- [GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
- [GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- expect.objectContaining({
- data: expect.objectContaining({
- 'vercel.ai.operationId': 'ai.toolCall',
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'internal_error',
- }),
- ]),
- };
-
- const expectedError = {
- level: 'error',
- tags: expect.objectContaining({
- 'vercel.ai.tool.name': 'getWeather',
- 'vercel.ai.tool.callId': 'call-1',
- }),
- };
-
let transactionEvent: Event | undefined;
let errorEvent: Event | undefined;
@@ -561,6 +268,28 @@ describe('Vercel AI integration (V6)', () => {
transactionEvent = transaction;
},
})
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(3);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+
+ const generateContentSpan = container.items.find(span => span.name === 'generate_content mock-model-id');
+ expect(generateContentSpan).toBeDefined();
+ expect(generateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(generateContentSpan!.status).toBe('ok');
+ expect(generateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+
+ const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.name).toBe('execute_tool getWeather');
+ expect(toolSpan!.status).toBe('error');
+ expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ },
+ })
.expect({
event: event => {
errorEvent = event;
@@ -570,10 +299,16 @@ describe('Vercel AI integration (V6)', () => {
.completed();
expect(transactionEvent).toBeDefined();
- expect(transactionEvent).toMatchObject(expectedTransaction);
+ expect(transactionEvent!.transaction).toBe('main');
expect(errorEvent).toBeDefined();
- expect(errorEvent).toMatchObject(expectedError);
+ expect(errorEvent!.level).toBe('error');
+ expect(errorEvent!.tags).toEqual(
+ expect.objectContaining({
+ 'vercel.ai.tool.name': 'getWeather',
+ 'vercel.ai.tool.callId': 'call-1',
+ }),
+ );
// Trace id should be the same for the transaction and error event
expect(transactionEvent!.contexts!.trace!.trace_id).toBe(errorEvent!.contexts!.trace!.trace_id);
@@ -592,7 +327,29 @@ describe('Vercel AI integration (V6)', () => {
'instrument.mjs',
(createRunner, test) => {
test('creates ai related spans with v6', async () => {
- await createRunner().expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }).start().completed();
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(7);
+ const invokeAgentSpans = container.items.filter(
+ span => span.attributes['sentry.op'].value === 'gen_ai.invoke_agent',
+ );
+ expect(invokeAgentSpans).toHaveLength(3);
+
+ const generateContentSpans = container.items.filter(
+ span => span.attributes['sentry.op'].value === 'gen_ai.generate_content',
+ );
+ expect(generateContentSpans).toHaveLength(3);
+
+ const toolSpan = container.items.find(
+ span => span.attributes['sentry.op'].value === 'gen_ai.execute_tool',
+ );
+ expect(toolSpan).toBeDefined();
+ },
+ })
+ .start()
+ .completed();
});
},
{
@@ -608,71 +365,50 @@ describe('Vercel AI integration (V6)', () => {
'instrument.mjs',
(createRunner, test) => {
test('creates spans for ToolLoopAgent with tool calls', async () => {
- const expectedTransaction = {
- transaction: 'main',
- spans: expect.arrayContaining([
- // ToolLoopAgent outer span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.invoke_agent',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
- }),
- description: 'invoke_agent weather_agent',
- op: 'gen_ai.invoke_agent',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // First doGenerate span (returns tool-calls)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Tool execution span
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
- [GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
- [GEN_AI_TOOL_TYPE_ATTRIBUTE]: 'function',
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.execute_tool',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- }),
- description: 'execute_tool getWeather',
- op: 'gen_ai.execute_tool',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- // Second doGenerate span (returns final text)
- expect.objectContaining({
- data: expect.objectContaining({
- [GEN_AI_OPERATION_NAME_ATTRIBUTE]: 'generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_OP]: 'gen_ai.generate_content',
- [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.vercelai.otel',
- [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
- [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
- [GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
- }),
- description: 'generate_content mock-model-id',
- op: 'gen_ai.generate_content',
- origin: 'auto.vercelai.otel',
- status: 'ok',
- }),
- ]),
- };
+ await createRunner()
+ .expect({ transaction: { transaction: 'main' } })
+ .expect({
+ span: container => {
+ expect(container.items).toHaveLength(4);
+ const invokeAgentSpan = container.items.find(span => span.name === 'invoke_agent weather_agent');
+ expect(invokeAgentSpan).toBeDefined();
+ expect(invokeAgentSpan!.name).toBe('invoke_agent weather_agent');
+ expect(invokeAgentSpan!.status).toBe('ok');
+ expect(invokeAgentSpan!.attributes['sentry.op'].value).toBe('gen_ai.invoke_agent');
+ expect(invokeAgentSpan!.attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE].value).toBe('mock-model-id');
+
+ const toolCallsGenerateContentSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["tool-calls"]',
+ );
+ expect(toolCallsGenerateContentSpan).toBeDefined();
+ expect(toolCallsGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(toolCallsGenerateContentSpan!.status).toBe('ok');
+ expect(toolCallsGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(10);
+ expect(toolCallsGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(20);
- await createRunner().expect({ transaction: expectedTransaction }).start().completed();
+ const toolSpan = container.items.find(span => span.name === 'execute_tool getWeather');
+ expect(toolSpan).toBeDefined();
+ expect(toolSpan!.name).toBe('execute_tool getWeather');
+ expect(toolSpan!.status).toBe('ok');
+ expect(toolSpan!.attributes['sentry.op'].value).toBe('gen_ai.execute_tool');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_NAME_ATTRIBUTE].value).toBe('getWeather');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_CALL_ID_ATTRIBUTE].value).toBe('call-1');
+ expect(toolSpan!.attributes[GEN_AI_TOOL_TYPE_ATTRIBUTE].value).toBe('function');
+
+ const finalGenerateContentSpan = container.items.find(
+ span => span.attributes[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]?.value === '["stop"]',
+ );
+ expect(finalGenerateContentSpan).toBeDefined();
+ expect(finalGenerateContentSpan!.name).toBe('generate_content mock-model-id');
+ expect(finalGenerateContentSpan!.status).toBe('ok');
+ expect(finalGenerateContentSpan!.attributes['sentry.op'].value).toBe('gen_ai.generate_content');
+ expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE].value).toBe(15);
+ expect(finalGenerateContentSpan!.attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE].value).toBe(25);
+ },
+ })
+ .start()
+ .completed();
});
},
{
diff --git a/dev-packages/node-integration-tests/utils/runner.ts b/dev-packages/node-integration-tests/utils/runner.ts
index 89f96974c123..0aebd074008d 100644
--- a/dev-packages/node-integration-tests/utils/runner.ts
+++ b/dev-packages/node-integration-tests/utils/runner.ts
@@ -602,7 +602,8 @@ export function createRunner(...paths: string[]) {
if (process.env.DEBUG) log('stderr line', output);
- if (ensureNoErrorOutput) {
+ // Ignore deprecation warnings for this purpose
+ if (ensureNoErrorOutput && !`${output}`.includes('DeprecationWarning:')) {
complete(new Error(`Expected no error output but got: '${output}'`));
}
});
diff --git a/dev-packages/rollup-utils/npmHelpers.mjs b/dev-packages/rollup-utils/npmHelpers.mjs
index 6f399c1c3f59..e361b00ef2b8 100644
--- a/dev-packages/rollup-utils/npmHelpers.mjs
+++ b/dev-packages/rollup-utils/npmHelpers.mjs
@@ -21,7 +21,7 @@ import {
makeSucrasePlugin,
} from './plugins/index.mjs';
import { makePackageNodeEsm } from './plugins/make-esm-plugin.mjs';
-import { mergePlugins } from './utils.mjs';
+import { mergeExternals, mergePlugins } from './utils.mjs';
const __dirname = path.dirname(fileURLToPath(import.meta.url));
@@ -47,6 +47,13 @@ export function makeBaseNPMConfig(options = {}) {
excludeIframe: undefined,
});
+ const deps = [
+ ...builtinModules.filter(m => !bundledBuiltins.includes(m)),
+ ...Object.keys(packageDotJSON.dependencies || {}),
+ ...Object.keys(packageDotJSON.peerDependencies || {}),
+ ...Object.keys(packageDotJSON.optionalDependencies || {}),
+ ];
+
const defaultBaseConfig = {
input: entrypoints,
@@ -100,17 +107,20 @@ export function makeBaseNPMConfig(options = {}) {
plugins: [nodeResolvePlugin, sucrasePlugin, debugBuildStatementReplacePlugin, rrwebBuildPlugin, cleanupPlugin],
// don't include imported modules from outside the package in the final output
- external: [
- ...builtinModules.filter(m => !bundledBuiltins.includes(m)),
- ...Object.keys(packageDotJSON.dependencies || {}),
- ...Object.keys(packageDotJSON.peerDependencies || {}),
- ...Object.keys(packageDotJSON.optionalDependencies || {}),
- ],
+ // also treat subpath exports (e.g. `@sentry/core/browser`) as external
+ external: id => {
+ // treat subpath exports as external if the package is external
+ return deps.some(dep => id === dep || id.startsWith(`${dep}/`));
+ },
};
return deepMerge(defaultBaseConfig, packageSpecificConfig, {
// Plugins have to be in the correct order or everything breaks, so when merging we have to manually re-order them
- customMerge: key => (key === 'plugins' ? mergePlugins : undefined),
+ customMerge: key => {
+ if (key === 'plugins') return mergePlugins;
+ if (key === 'external') return mergeExternals;
+ return undefined;
+ },
});
}
diff --git a/dev-packages/rollup-utils/utils.mjs b/dev-packages/rollup-utils/utils.mjs
index b687ff9993c4..dd64e1e38c10 100644
--- a/dev-packages/rollup-utils/utils.mjs
+++ b/dev-packages/rollup-utils/utils.mjs
@@ -10,6 +10,25 @@ export const insertAt = (arr, index, ...insertees) => {
return newArr;
};
+/**
+ * Turn a list of module IDs into a test function
+ * Includes submodule exports by checking that it starts with the name
+ * plus a / character. The list would contain something like `'@sentry/core'`
+ * and we might test it against a module id like `'@sentry/core/browser'`
+ */
+const toFilterFunction = list => (Array.isArray(list) ? id => list.some(test => filterTest(test, id)) : list);
+
+const filterTest = (test, id) => (test instanceof RegExp ? test.test(id) : id === test || id.startsWith(`${test}/`));
+
+/**
+ * Merge two external configs (function or array), returning a function that handles both.
+ */
+export function mergeExternals(base, specific) {
+ const baseFn = toFilterFunction(base);
+ const specificFn = toFilterFunction(specific);
+ return id => baseFn(id) || specificFn(id);
+}
+
/**
* Merge two arrays of plugins, making sure they're sorted in the correct order.
*/
diff --git a/package.json b/package.json
index e71f94772bd0..bca64a9f863f 100644
--- a/package.json
+++ b/package.json
@@ -139,7 +139,7 @@
"oxlint": "^1.53.0",
"oxlint-tsgolint": "^0.16.0",
"rimraf": "^5.0.10",
- "rollup": "^4.59.0",
+ "rollup": "^4.60.3",
"rollup-plugin-cleanup": "^3.2.1",
"rollup-plugin-license": "^3.7.1",
"size-limit": "~12.1.0",
diff --git a/packages/astro/package.json b/packages/astro/package.json
index 0a1ccf12c590..5daba98966f1 100644
--- a/packages/astro/package.json
+++ b/packages/astro/package.json
@@ -59,7 +59,7 @@
"@sentry/browser": "10.52.0",
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
- "@sentry/vite-plugin": "^5.2.0"
+ "@sentry/vite-plugin": "^5.3.0"
},
"devDependencies": {
"astro": "^3.5.0",
diff --git a/packages/astro/src/integration/index.ts b/packages/astro/src/integration/index.ts
index 5c5ca2710af6..a1b4df28b003 100644
--- a/packages/astro/src/integration/index.ts
+++ b/packages/astro/src/integration/index.ts
@@ -35,6 +35,7 @@ export const sentryAstro = (options: SentryOptions = {}): AstroIntegration => {
// todo(v11): Extract `release` build time option here - cannot be done currently, because it conflicts with the `DeprecatedRuntimeOptions` type
// release,
bundleSizeOptimizations,
+ applicationKey,
unstable_sentryVitePluginOptions,
debug,
org,
@@ -109,6 +110,7 @@ export const sentryAstro = (options: SentryOptions = {}): AstroIntegration => {
},
plugins: [
sentryVitePlugin({
+ applicationKey,
// Priority: top-level options > deprecated options > env vars
// eslint-disable-next-line deprecation/deprecation
org: org ?? uploadOptions.org ?? env.SENTRY_ORG,
diff --git a/packages/astro/test/integration/index.test.ts b/packages/astro/test/integration/index.test.ts
index 15b04ac041bc..a7b4d68f16c3 100644
--- a/packages/astro/test/integration/index.test.ts
+++ b/packages/astro/test/integration/index.test.ts
@@ -269,6 +269,21 @@ describe('sentryAstro integration', () => {
);
});
+ it('passes top-level applicationKey to the vite plugin', async () => {
+ const integration = sentryAstro({
+ applicationKey: 'my-app-key',
+ sourceMapsUploadOptions: { enabled: true, org: 'my-org', project: 'my-project' },
+ });
+ // @ts-expect-error - the hook exists and we only need to pass what we actually use
+ await integration.hooks['astro:config:setup']({ ...baseConfigHookObject, updateConfig, injectScript, config });
+
+ expect(sentryVitePluginSpy).toHaveBeenCalledWith(
+ expect.objectContaining({
+ applicationKey: 'my-app-key',
+ }),
+ );
+ });
+
it("doesn't enable source maps if `sourceMapsUploadOptions.enabled` is `false`", async () => {
const integration = sentryAstro({
sourceMapsUploadOptions: { enabled: false },
diff --git a/packages/astro/tsconfig.json b/packages/astro/tsconfig.json
index bf45a09f2d71..d1a9c722679f 100644
--- a/packages/astro/tsconfig.json
+++ b/packages/astro/tsconfig.json
@@ -4,6 +4,7 @@
"include": ["src/**/*"],
"compilerOptions": {
+ "moduleResolution": "bundler"
// package-specific options
}
}
diff --git a/packages/browser/src/client.ts b/packages/browser/src/client.ts
index 4ffc85b07762..8d0242d82cbf 100644
--- a/packages/browser/src/client.ts
+++ b/packages/browser/src/client.ts
@@ -8,7 +8,7 @@ import type {
ParameterizedString,
Scope,
SeverityLevel,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
_INTERNAL_flushLogsBuffer,
_INTERNAL_flushMetricsBuffer,
@@ -16,7 +16,7 @@ import {
applySdkMetadata,
Client,
getSDKSource,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { eventFromException, eventFromMessage } from './eventbuilder';
import { WINDOW } from './helpers';
import type { BrowserTransportOptions } from './transports/types';
diff --git a/packages/browser/src/diagnose-sdk.ts b/packages/browser/src/diagnose-sdk.ts
index 0a5fdd0da05b..0be386ce6d32 100644
--- a/packages/browser/src/diagnose-sdk.ts
+++ b/packages/browser/src/diagnose-sdk.ts
@@ -1,4 +1,4 @@
-import { getClient, suppressTracing } from '@sentry/core';
+import { getClient, suppressTracing } from '@sentry/core/browser';
/**
* A function to diagnose why the SDK might not be successfully sending data.
diff --git a/packages/browser/src/eventbuilder.ts b/packages/browser/src/eventbuilder.ts
index b430007b552d..76e38ec00468 100644
--- a/packages/browser/src/eventbuilder.ts
+++ b/packages/browser/src/eventbuilder.ts
@@ -6,7 +6,7 @@ import type {
SeverityLevel,
StackFrame,
StackParser,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
_INTERNAL_enhanceErrorWithSentryInfo,
addExceptionMechanism,
@@ -22,7 +22,7 @@ import {
isPlainObject,
normalizeToSize,
resolvedSyncPromise,
-} from '@sentry/core';
+} from '@sentry/core/browser';
type Prototype = { constructor: (...args: unknown[]) => unknown };
diff --git a/packages/browser/src/exports.ts b/packages/browser/src/exports.ts
index 322cc0b3c8db..0ede8e8d76be 100644
--- a/packages/browser/src/exports.ts
+++ b/packages/browser/src/exports.ts
@@ -20,7 +20,7 @@ export type {
ExclusiveEventHintOrCaptureContext,
Log,
LogSeverityLevel,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export type { BrowserOptions } from './client';
@@ -72,14 +72,14 @@ export {
updateSpanName,
withStreamedSpan,
metrics,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
SEMANTIC_ATTRIBUTE_SENTRY_OP,
SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN,
SEMANTIC_ATTRIBUTE_SENTRY_SOURCE,
SEMANTIC_ATTRIBUTE_SENTRY_SAMPLE_RATE,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export { WINDOW } from './helpers';
export { BrowserClient } from './client';
diff --git a/packages/browser/src/feedbackAsync.ts b/packages/browser/src/feedbackAsync.ts
index c40964bb5c27..30f31bec01d5 100644
--- a/packages/browser/src/feedbackAsync.ts
+++ b/packages/browser/src/feedbackAsync.ts
@@ -5,6 +5,6 @@ import { lazyLoadIntegration } from './utils/lazyLoadIntegration';
* An integration to add user feedback to your application,
* while loading most of the code lazily only when it's needed.
*/
-export const feedbackAsyncIntegration = buildFeedbackIntegration({
+export const feedbackAsyncIntegration: ReturnType = buildFeedbackIntegration({
lazyLoadIntegration,
});
diff --git a/packages/browser/src/feedbackSync.ts b/packages/browser/src/feedbackSync.ts
index ede41fefb221..879d3a7a453c 100644
--- a/packages/browser/src/feedbackSync.ts
+++ b/packages/browser/src/feedbackSync.ts
@@ -5,7 +5,7 @@ import {
} from '@sentry-internal/feedback';
/** Add a widget to capture user feedback to your application. */
-export const feedbackSyncIntegration = buildFeedbackIntegration({
+export const feedbackSyncIntegration: ReturnType = buildFeedbackIntegration({
getModalIntegration: () => feedbackModalIntegration,
getScreenshotIntegration: () => feedbackScreenshotIntegration,
});
diff --git a/packages/browser/src/helpers.ts b/packages/browser/src/helpers.ts
index 93c87e1d6161..09359b122450 100644
--- a/packages/browser/src/helpers.ts
+++ b/packages/browser/src/helpers.ts
@@ -1,4 +1,4 @@
-import type { Mechanism, WrappedFunction } from '@sentry/core';
+import type { Mechanism, WrappedFunction } from '@sentry/core/browser';
import {
addExceptionMechanism,
addExceptionTypeValue,
@@ -9,7 +9,7 @@ import {
GLOBAL_OBJ,
markFunctionWrapped,
withScope,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export const WINDOW = GLOBAL_OBJ as typeof GLOBAL_OBJ & Window;
diff --git a/packages/browser/src/index.bundle.logs.metrics.ts b/packages/browser/src/index.bundle.logs.metrics.ts
index 415a56cf7cc6..5a101f8066f8 100644
--- a/packages/browser/src/index.bundle.logs.metrics.ts
+++ b/packages/browser/src/index.bundle.logs.metrics.ts
@@ -8,7 +8,7 @@ import {
export * from './index.bundle.base';
// TODO(v11): Export metrics here once we remove it from the base bundle.
-export { logger, consoleLoggingIntegration } from '@sentry/core';
+export { logger, consoleLoggingIntegration } from '@sentry/core/browser';
export { elementTimingIntegration } from '@sentry-internal/browser-utils';
diff --git a/packages/browser/src/index.bundle.replay.logs.metrics.ts b/packages/browser/src/index.bundle.replay.logs.metrics.ts
index 02938d0d7063..4a3003a491bb 100644
--- a/packages/browser/src/index.bundle.replay.logs.metrics.ts
+++ b/packages/browser/src/index.bundle.replay.logs.metrics.ts
@@ -7,7 +7,7 @@ import {
export * from './index.bundle.base';
// TODO(v11): Export metrics here once we remove it from the base bundle.
-export { logger, consoleLoggingIntegration } from '@sentry/core';
+export { logger, consoleLoggingIntegration } from '@sentry/core/browser';
export { replayIntegration, getReplay } from '@sentry-internal/replay';
diff --git a/packages/browser/src/index.bundle.tracing.logs.metrics.ts b/packages/browser/src/index.bundle.tracing.logs.metrics.ts
index 19b8118a5c04..eec04b9ee335 100644
--- a/packages/browser/src/index.bundle.tracing.logs.metrics.ts
+++ b/packages/browser/src/index.bundle.tracing.logs.metrics.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import { feedbackIntegrationShim, replayIntegrationShim } from '@sentry-internal/integration-shims';
registerSpanErrorInstrumentation();
@@ -6,7 +6,7 @@ registerSpanErrorInstrumentation();
export * from './index.bundle.base';
// TODO(v11): Export metrics here once we remove it from the base bundle.
-export { logger, consoleLoggingIntegration } from '@sentry/core';
+export { logger, consoleLoggingIntegration } from '@sentry/core/browser';
export {
getActiveSpan,
@@ -18,7 +18,7 @@ export {
startSpan,
startSpanManual,
withActiveSpan,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.bundle.tracing.replay.feedback.logs.metrics.ts b/packages/browser/src/index.bundle.tracing.replay.feedback.logs.metrics.ts
index 5a531f6b33a9..6131da91aeb7 100644
--- a/packages/browser/src/index.bundle.tracing.replay.feedback.logs.metrics.ts
+++ b/packages/browser/src/index.bundle.tracing.replay.feedback.logs.metrics.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import { feedbackAsyncIntegration } from './feedbackAsync';
registerSpanErrorInstrumentation();
@@ -18,7 +18,7 @@ export {
withActiveSpan,
logger,
consoleLoggingIntegration,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.bundle.tracing.replay.feedback.ts b/packages/browser/src/index.bundle.tracing.replay.feedback.ts
index 47b43d48f376..66d34bdd7831 100644
--- a/packages/browser/src/index.bundle.tracing.replay.feedback.ts
+++ b/packages/browser/src/index.bundle.tracing.replay.feedback.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import {
consoleLoggingIntegrationShim,
elementTimingIntegrationShim,
@@ -23,7 +23,7 @@ export {
withActiveSpan,
getSpanDescendants,
setMeasurement,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.bundle.tracing.replay.logs.metrics.ts b/packages/browser/src/index.bundle.tracing.replay.logs.metrics.ts
index 45c7299bf436..470f8b2279c6 100644
--- a/packages/browser/src/index.bundle.tracing.replay.logs.metrics.ts
+++ b/packages/browser/src/index.bundle.tracing.replay.logs.metrics.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import { feedbackIntegrationShim } from '@sentry-internal/integration-shims';
registerSpanErrorInstrumentation();
@@ -6,7 +6,7 @@ registerSpanErrorInstrumentation();
export * from './index.bundle.base';
// TODO(v11): Export metrics here once we remove it from the base bundle.
-export { logger, consoleLoggingIntegration } from '@sentry/core';
+export { logger, consoleLoggingIntegration } from '@sentry/core/browser';
export {
getActiveSpan,
@@ -18,7 +18,7 @@ export {
startSpan,
startSpanManual,
withActiveSpan,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.bundle.tracing.replay.ts b/packages/browser/src/index.bundle.tracing.replay.ts
index 63eb9a81c24a..5c56feada691 100644
--- a/packages/browser/src/index.bundle.tracing.replay.ts
+++ b/packages/browser/src/index.bundle.tracing.replay.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import {
consoleLoggingIntegrationShim,
elementTimingIntegrationShim,
@@ -23,7 +23,7 @@ export {
withActiveSpan,
getSpanDescendants,
setMeasurement,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.bundle.tracing.ts b/packages/browser/src/index.bundle.tracing.ts
index a385ad4b0792..44bcc0d2ef4a 100644
--- a/packages/browser/src/index.bundle.tracing.ts
+++ b/packages/browser/src/index.bundle.tracing.ts
@@ -1,4 +1,4 @@
-import { registerSpanErrorInstrumentation } from '@sentry/core';
+import { registerSpanErrorInstrumentation } from '@sentry/core/browser';
import {
consoleLoggingIntegrationShim,
elementTimingIntegrationShim,
@@ -24,7 +24,7 @@ export {
withActiveSpan,
getSpanDescendants,
setMeasurement,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export {
browserTracingIntegration,
diff --git a/packages/browser/src/index.ts b/packages/browser/src/index.ts
index 4709e6167b3c..35b65553b7d7 100644
--- a/packages/browser/src/index.ts
+++ b/packages/browser/src/index.ts
@@ -15,7 +15,7 @@ export {
rewriteFramesIntegration,
consoleLoggingIntegration,
createConsolaReporter,
-} from '@sentry/core';
+} from '@sentry/core/browser';
export { replayIntegration, getReplay } from '@sentry-internal/replay';
export type {
@@ -76,8 +76,8 @@ export {
createLangChainCallbackHandler,
instrumentLangChainEmbeddings,
logger,
-} from '@sentry/core';
-export type { Span, FeatureFlagsIntegration } from '@sentry/core';
+} from '@sentry/core/browser';
+export type { Span, FeatureFlagsIntegration } from '@sentry/core/browser';
export { makeBrowserOfflineTransport } from './transports/offline';
export { browserProfilingIntegration } from './profiling/integration';
export { spotlightBrowserIntegration } from './integrations/spotlight';
diff --git a/packages/browser/src/integrations-bundle/index.captureconsole.ts b/packages/browser/src/integrations-bundle/index.captureconsole.ts
index a2187ae98798..3d25bf4edaf5 100644
--- a/packages/browser/src/integrations-bundle/index.captureconsole.ts
+++ b/packages/browser/src/integrations-bundle/index.captureconsole.ts
@@ -1 +1 @@
-export { captureConsoleIntegration } from '@sentry/core';
+export { captureConsoleIntegration } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.createlangchaincallbackhandler.ts b/packages/browser/src/integrations-bundle/index.createlangchaincallbackhandler.ts
index 8a64eda45579..a7bdee8b6693 100644
--- a/packages/browser/src/integrations-bundle/index.createlangchaincallbackhandler.ts
+++ b/packages/browser/src/integrations-bundle/index.createlangchaincallbackhandler.ts
@@ -1 +1 @@
-export { createLangChainCallbackHandler } from '@sentry/core';
+export { createLangChainCallbackHandler } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.dedupe.ts b/packages/browser/src/integrations-bundle/index.dedupe.ts
index 776d967c31a9..19ba1ad738d4 100644
--- a/packages/browser/src/integrations-bundle/index.dedupe.ts
+++ b/packages/browser/src/integrations-bundle/index.dedupe.ts
@@ -1 +1 @@
-export { dedupeIntegration } from '@sentry/core';
+export { dedupeIntegration } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.extraerrordata.ts b/packages/browser/src/integrations-bundle/index.extraerrordata.ts
index 4306f9694902..7df49cf87be6 100644
--- a/packages/browser/src/integrations-bundle/index.extraerrordata.ts
+++ b/packages/browser/src/integrations-bundle/index.extraerrordata.ts
@@ -1 +1 @@
-export { extraErrorDataIntegration } from '@sentry/core';
+export { extraErrorDataIntegration } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.feedback.ts b/packages/browser/src/integrations-bundle/index.feedback.ts
index f5c10b970690..2518eb82e776 100644
--- a/packages/browser/src/integrations-bundle/index.feedback.ts
+++ b/packages/browser/src/integrations-bundle/index.feedback.ts
@@ -4,4 +4,4 @@ export { getFeedback } from '@sentry-internal/feedback';
export { feedbackAsyncIntegration, feedbackAsyncIntegration as feedbackIntegration };
-export { captureFeedback } from '@sentry/core';
+export { captureFeedback } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts
index d82909a524d8..ab7b3157953a 100644
--- a/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts
+++ b/packages/browser/src/integrations-bundle/index.instrumentanthropicaiclient.ts
@@ -1 +1 @@
-export { instrumentAnthropicAiClient } from '@sentry/core';
+export { instrumentAnthropicAiClient } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts
index ec58139c0681..9e8316dc7e43 100644
--- a/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts
+++ b/packages/browser/src/integrations-bundle/index.instrumentgooglegenaiclient.ts
@@ -1 +1 @@
-export { instrumentGoogleGenAIClient } from '@sentry/core';
+export { instrumentGoogleGenAIClient } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.instrumentlangchainembeddings.ts b/packages/browser/src/integrations-bundle/index.instrumentlangchainembeddings.ts
index 644b8a2ef570..b8b733fc9907 100644
--- a/packages/browser/src/integrations-bundle/index.instrumentlangchainembeddings.ts
+++ b/packages/browser/src/integrations-bundle/index.instrumentlangchainembeddings.ts
@@ -1 +1 @@
-export { instrumentLangChainEmbeddings } from '@sentry/core';
+export { instrumentLangChainEmbeddings } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.instrumentlanggraph.ts b/packages/browser/src/integrations-bundle/index.instrumentlanggraph.ts
index c7a8c0e9e591..e54333eed24a 100644
--- a/packages/browser/src/integrations-bundle/index.instrumentlanggraph.ts
+++ b/packages/browser/src/integrations-bundle/index.instrumentlanggraph.ts
@@ -1 +1 @@
-export { instrumentLangGraph } from '@sentry/core';
+export { instrumentLangGraph } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts b/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts
index 5371961ff03a..813ad7b2a9fb 100644
--- a/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts
+++ b/packages/browser/src/integrations-bundle/index.instrumentopenaiclient.ts
@@ -1 +1 @@
-export { instrumentOpenAiClient } from '@sentry/core';
+export { instrumentOpenAiClient } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.modulemetadata.ts b/packages/browser/src/integrations-bundle/index.modulemetadata.ts
index c4f4a2b9cf75..b63da0e5a49d 100644
--- a/packages/browser/src/integrations-bundle/index.modulemetadata.ts
+++ b/packages/browser/src/integrations-bundle/index.modulemetadata.ts
@@ -1 +1 @@
-export { moduleMetadataIntegration } from '@sentry/core';
+export { moduleMetadataIntegration } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations-bundle/index.rewriteframes.ts b/packages/browser/src/integrations-bundle/index.rewriteframes.ts
index 07ebaf6666f5..fbdc2d071d3e 100644
--- a/packages/browser/src/integrations-bundle/index.rewriteframes.ts
+++ b/packages/browser/src/integrations-bundle/index.rewriteframes.ts
@@ -1 +1 @@
-export { rewriteFramesIntegration } from '@sentry/core';
+export { rewriteFramesIntegration } from '@sentry/core/browser';
diff --git a/packages/browser/src/integrations/breadcrumbs.ts b/packages/browser/src/integrations/breadcrumbs.ts
index de99621bf52f..7378ffc7c377 100644
--- a/packages/browser/src/integrations/breadcrumbs.ts
+++ b/packages/browser/src/integrations/breadcrumbs.ts
@@ -14,7 +14,7 @@ import type {
IntegrationFn,
XhrBreadcrumbData,
XhrBreadcrumbHint,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
addBreadcrumb,
addConsoleInstrumentationHandler,
@@ -29,7 +29,7 @@ import {
parseUrl,
safeJoin,
severityLevelFromString,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { FetchHint, XhrHint } from '@sentry-internal/browser-utils';
import {
addClickKeypressInstrumentationHandler,
diff --git a/packages/browser/src/integrations/browserapierrors.ts b/packages/browser/src/integrations/browserapierrors.ts
index cd32435fa5b0..0688bcf1bb76 100644
--- a/packages/browser/src/integrations/browserapierrors.ts
+++ b/packages/browser/src/integrations/browserapierrors.ts
@@ -1,5 +1,5 @@
-import type { IntegrationFn, WrappedFunction } from '@sentry/core';
-import { defineIntegration, fill, getFunctionName, getOriginalFunction } from '@sentry/core';
+import type { IntegrationFn, WrappedFunction } from '@sentry/core/browser';
+import { defineIntegration, fill, getFunctionName, getOriginalFunction } from '@sentry/core/browser';
import { WINDOW, wrap } from '../helpers';
// Using a comma-separated string and split for smaller bundle size vs an array literal
diff --git a/packages/browser/src/integrations/browsersession.ts b/packages/browser/src/integrations/browsersession.ts
index 23fdb4087af9..7d339baa42d5 100644
--- a/packages/browser/src/integrations/browsersession.ts
+++ b/packages/browser/src/integrations/browsersession.ts
@@ -1,4 +1,4 @@
-import { captureSession, debug, defineIntegration, getIsolationScope, startSession } from '@sentry/core';
+import { captureSession, debug, defineIntegration, getIsolationScope, startSession } from '@sentry/core/browser';
import { addHistoryInstrumentationHandler } from '@sentry-internal/browser-utils';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
diff --git a/packages/browser/src/integrations/contextlines.ts b/packages/browser/src/integrations/contextlines.ts
index 07dae0b9dc32..70da4bac7504 100644
--- a/packages/browser/src/integrations/contextlines.ts
+++ b/packages/browser/src/integrations/contextlines.ts
@@ -1,5 +1,5 @@
-import type { Event, IntegrationFn, StackFrame } from '@sentry/core';
-import { addContextToFrame, defineIntegration, GLOBAL_OBJ, stripUrlQueryAndFragment } from '@sentry/core';
+import type { Event, IntegrationFn, StackFrame } from '@sentry/core/browser';
+import { addContextToFrame, defineIntegration, GLOBAL_OBJ, stripUrlQueryAndFragment } from '@sentry/core/browser';
const WINDOW = GLOBAL_OBJ as typeof GLOBAL_OBJ & Window;
diff --git a/packages/browser/src/integrations/culturecontext.ts b/packages/browser/src/integrations/culturecontext.ts
index cb4a1c975937..4876d5a80dd0 100644
--- a/packages/browser/src/integrations/culturecontext.ts
+++ b/packages/browser/src/integrations/culturecontext.ts
@@ -1,5 +1,5 @@
-import type { CultureContext, IntegrationFn } from '@sentry/core';
-import { defineIntegration, safeSetSpanJSONAttributes } from '@sentry/core';
+import type { CultureContext, IntegrationFn } from '@sentry/core/browser';
+import { defineIntegration, safeSetSpanJSONAttributes } from '@sentry/core/browser';
import { WINDOW } from '../helpers';
const INTEGRATION_NAME = 'CultureContext';
diff --git a/packages/browser/src/integrations/featureFlags/growthbook/integration.ts b/packages/browser/src/integrations/featureFlags/growthbook/integration.ts
index 560918535cce..38ae92009063 100644
--- a/packages/browser/src/integrations/featureFlags/growthbook/integration.ts
+++ b/packages/browser/src/integrations/featureFlags/growthbook/integration.ts
@@ -1,5 +1,5 @@
-import type { IntegrationFn } from '@sentry/core';
-import { growthbookIntegration as coreGrowthbookIntegration } from '@sentry/core';
+import type { IntegrationFn } from '@sentry/core/browser';
+import { growthbookIntegration as coreGrowthbookIntegration } from '@sentry/core/browser';
import type { GrowthBookClass } from './types';
/**
diff --git a/packages/browser/src/integrations/featureFlags/launchdarkly/integration.ts b/packages/browser/src/integrations/featureFlags/launchdarkly/integration.ts
index 822e4b1d7f80..5283dbb7434a 100644
--- a/packages/browser/src/integrations/featureFlags/launchdarkly/integration.ts
+++ b/packages/browser/src/integrations/featureFlags/launchdarkly/integration.ts
@@ -1,10 +1,10 @@
-import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core';
+import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core/browser';
import {
_INTERNAL_addFeatureFlagToActiveSpan,
_INTERNAL_copyFlagsFromScopeToEvent,
_INTERNAL_insertFlagToScope,
defineIntegration,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { LDContext, LDEvaluationDetail, LDInspectionFlagUsedHandler } from './types';
/**
diff --git a/packages/browser/src/integrations/featureFlags/openfeature/integration.ts b/packages/browser/src/integrations/featureFlags/openfeature/integration.ts
index 85aedbf779f9..fd4b01ad38eb 100644
--- a/packages/browser/src/integrations/featureFlags/openfeature/integration.ts
+++ b/packages/browser/src/integrations/featureFlags/openfeature/integration.ts
@@ -13,13 +13,13 @@
* OpenFeature.addHooks(new Sentry.OpenFeatureIntegrationHook());
* ```
*/
-import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core';
+import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core/browser';
import {
_INTERNAL_addFeatureFlagToActiveSpan,
_INTERNAL_copyFlagsFromScopeToEvent,
_INTERNAL_insertFlagToScope,
defineIntegration,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { EvaluationDetails, HookContext, HookHints, JsonValue, OpenFeatureHook } from './types';
export const openFeatureIntegration = defineIntegration(() => {
diff --git a/packages/browser/src/integrations/featureFlags/statsig/integration.ts b/packages/browser/src/integrations/featureFlags/statsig/integration.ts
index 9aef234045b5..94df3857b79e 100644
--- a/packages/browser/src/integrations/featureFlags/statsig/integration.ts
+++ b/packages/browser/src/integrations/featureFlags/statsig/integration.ts
@@ -1,10 +1,10 @@
-import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core';
+import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core/browser';
import {
_INTERNAL_addFeatureFlagToActiveSpan,
_INTERNAL_copyFlagsFromScopeToEvent,
_INTERNAL_insertFlagToScope,
defineIntegration,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { FeatureGate, StatsigClient } from './types';
/**
diff --git a/packages/browser/src/integrations/featureFlags/unleash/integration.ts b/packages/browser/src/integrations/featureFlags/unleash/integration.ts
index c822b49f8810..b9421efe0d37 100644
--- a/packages/browser/src/integrations/featureFlags/unleash/integration.ts
+++ b/packages/browser/src/integrations/featureFlags/unleash/integration.ts
@@ -1,4 +1,4 @@
-import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core';
+import type { Client, Event, EventHint, IntegrationFn } from '@sentry/core/browser';
import {
_INTERNAL_addFeatureFlagToActiveSpan,
_INTERNAL_copyFlagsFromScopeToEvent,
@@ -6,7 +6,7 @@ import {
debug,
defineIntegration,
fill,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { DEBUG_BUILD } from '../../../debug-build';
import type { UnleashClient, UnleashClientClass } from './types';
diff --git a/packages/browser/src/integrations/globalhandlers.ts b/packages/browser/src/integrations/globalhandlers.ts
index 70b3516b63b1..00deac133889 100644
--- a/packages/browser/src/integrations/globalhandlers.ts
+++ b/packages/browser/src/integrations/globalhandlers.ts
@@ -1,4 +1,4 @@
-import type { Client, Event, IntegrationFn, Primitive, StackParser } from '@sentry/core';
+import type { Client, Event, IntegrationFn, Primitive, StackParser } from '@sentry/core/browser';
import {
addGlobalErrorInstrumentationHandler,
addGlobalUnhandledRejectionInstrumentationHandler,
@@ -11,7 +11,7 @@ import {
isString,
stripDataUrlContent,
UNKNOWN_FUNCTION,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { BrowserClient } from '../client';
import { DEBUG_BUILD } from '../debug-build';
import { eventFromUnknownInput } from '../eventbuilder';
diff --git a/packages/browser/src/integrations/graphqlClient.ts b/packages/browser/src/integrations/graphqlClient.ts
index 51a3fe939f23..74b1b425aaa3 100644
--- a/packages/browser/src/integrations/graphqlClient.ts
+++ b/packages/browser/src/integrations/graphqlClient.ts
@@ -1,4 +1,4 @@
-import type { Client, IntegrationFn } from '@sentry/core';
+import type { Client, IntegrationFn } from '@sentry/core/browser';
import {
defineIntegration,
isString,
@@ -7,7 +7,7 @@ import {
SEMANTIC_ATTRIBUTE_URL_FULL,
spanToJSON,
stringMatchesSomePattern,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { FetchHint, XhrHint } from '@sentry-internal/browser-utils';
import { getBodyString, getFetchRequestArgBody, SENTRY_XHR_DATA_KEY } from '@sentry-internal/browser-utils';
diff --git a/packages/browser/src/integrations/httpclient.ts b/packages/browser/src/integrations/httpclient.ts
index 76f32158f496..c2152f0ab78d 100644
--- a/packages/browser/src/integrations/httpclient.ts
+++ b/packages/browser/src/integrations/httpclient.ts
@@ -1,4 +1,4 @@
-import type { Client, Event as SentryEvent, IntegrationFn, SentryWrappedXMLHttpRequest } from '@sentry/core';
+import type { Client, Event as SentryEvent, IntegrationFn, SentryWrappedXMLHttpRequest } from '@sentry/core/browser';
import {
addExceptionMechanism,
addFetchInstrumentationHandler,
@@ -9,7 +9,7 @@ import {
GLOBAL_OBJ,
isSentryRequestUrl,
supportsNativeFetch,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { addXhrInstrumentationHandler, SENTRY_XHR_DATA_KEY } from '@sentry-internal/browser-utils';
import { DEBUG_BUILD } from '../debug-build';
diff --git a/packages/browser/src/integrations/httpcontext.ts b/packages/browser/src/integrations/httpcontext.ts
index bc331ed1e0f8..3a7623496a65 100644
--- a/packages/browser/src/integrations/httpcontext.ts
+++ b/packages/browser/src/integrations/httpcontext.ts
@@ -1,4 +1,4 @@
-import { defineIntegration, safeSetSpanJSONAttributes } from '@sentry/core';
+import { defineIntegration, safeSetSpanJSONAttributes } from '@sentry/core/browser';
import { getHttpRequestData, WINDOW } from '../helpers';
/**
diff --git a/packages/browser/src/integrations/linkederrors.ts b/packages/browser/src/integrations/linkederrors.ts
index d0eace11aadb..64b7f5a3edbd 100644
--- a/packages/browser/src/integrations/linkederrors.ts
+++ b/packages/browser/src/integrations/linkederrors.ts
@@ -1,5 +1,5 @@
-import type { IntegrationFn } from '@sentry/core';
-import { applyAggregateErrorsToEvent, defineIntegration } from '@sentry/core';
+import type { IntegrationFn } from '@sentry/core/browser';
+import { applyAggregateErrorsToEvent, defineIntegration } from '@sentry/core/browser';
import { exceptionFromError } from '../eventbuilder';
interface LinkedErrorsOptions {
diff --git a/packages/browser/src/integrations/reportingobserver.ts b/packages/browser/src/integrations/reportingobserver.ts
index 571dca9db839..6e06e15646c0 100644
--- a/packages/browser/src/integrations/reportingobserver.ts
+++ b/packages/browser/src/integrations/reportingobserver.ts
@@ -1,4 +1,4 @@
-import type { Client, IntegrationFn } from '@sentry/core';
+import type { Client, IntegrationFn } from '@sentry/core/browser';
import {
captureMessage,
defineIntegration,
@@ -6,7 +6,7 @@ import {
GLOBAL_OBJ,
supportsReportingObserver,
withScope,
-} from '@sentry/core';
+} from '@sentry/core/browser';
const WINDOW = GLOBAL_OBJ as typeof GLOBAL_OBJ & Window;
diff --git a/packages/browser/src/integrations/spanstreaming.ts b/packages/browser/src/integrations/spanstreaming.ts
index ad6a35d1813b..9f82904419c6 100644
--- a/packages/browser/src/integrations/spanstreaming.ts
+++ b/packages/browser/src/integrations/spanstreaming.ts
@@ -1,4 +1,4 @@
-import type { IntegrationFn } from '@sentry/core';
+import type { IntegrationFn } from '@sentry/core/browser';
import {
captureSpan,
debug,
@@ -7,7 +7,7 @@ import {
isStreamedBeforeSendSpanCallback,
SpanBuffer,
spanIsSampled,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
export const spanStreamingIntegration = defineIntegration(() => {
diff --git a/packages/browser/src/integrations/spotlight.ts b/packages/browser/src/integrations/spotlight.ts
index 4c04b16ed63b..629710e6154f 100644
--- a/packages/browser/src/integrations/spotlight.ts
+++ b/packages/browser/src/integrations/spotlight.ts
@@ -1,5 +1,5 @@
-import type { Client, Envelope, IntegrationFn } from '@sentry/core';
-import { debug, defineIntegration, serializeEnvelope } from '@sentry/core';
+import type { Client, Envelope, IntegrationFn } from '@sentry/core/browser';
+import { debug, defineIntegration, serializeEnvelope } from '@sentry/core/browser';
import { getNativeImplementation } from '@sentry-internal/browser-utils';
import { DEBUG_BUILD } from '../debug-build';
import type { WINDOW } from '../helpers';
diff --git a/packages/browser/src/integrations/view-hierarchy.ts b/packages/browser/src/integrations/view-hierarchy.ts
index fa35ad7e00a2..edb959eca3f9 100644
--- a/packages/browser/src/integrations/view-hierarchy.ts
+++ b/packages/browser/src/integrations/view-hierarchy.ts
@@ -1,5 +1,5 @@
-import type { Attachment, Event, EventHint, ViewHierarchyData, ViewHierarchyWindow } from '@sentry/core';
-import { defineIntegration, getComponentName } from '@sentry/core';
+import type { Attachment, Event, EventHint, ViewHierarchyData, ViewHierarchyWindow } from '@sentry/core/browser';
+import { defineIntegration, getComponentName } from '@sentry/core/browser';
import { WINDOW } from '../helpers';
interface OnElementArgs {
diff --git a/packages/browser/src/integrations/webWorker.ts b/packages/browser/src/integrations/webWorker.ts
index 147135526ec3..a79dc463278a 100644
--- a/packages/browser/src/integrations/webWorker.ts
+++ b/packages/browser/src/integrations/webWorker.ts
@@ -1,5 +1,5 @@
-import type { DebugImage, Integration, IntegrationFn } from '@sentry/core';
-import { captureEvent, debug, defineIntegration, getClient, isPlainObject, isPrimitive } from '@sentry/core';
+import type { DebugImage, Integration, IntegrationFn } from '@sentry/core/browser';
+import { captureEvent, debug, defineIntegration, getClient, isPlainObject, isPrimitive } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import { eventFromUnknownInput } from '../eventbuilder';
import { WINDOW } from '../helpers';
diff --git a/packages/browser/src/pluggable-exports-bundle/index.multiplexedtransport.ts b/packages/browser/src/pluggable-exports-bundle/index.multiplexedtransport.ts
index a7d637d9e62f..a0b137cb1a53 100644
--- a/packages/browser/src/pluggable-exports-bundle/index.multiplexedtransport.ts
+++ b/packages/browser/src/pluggable-exports-bundle/index.multiplexedtransport.ts
@@ -1 +1 @@
-export { makeMultiplexedTransport } from '@sentry/core';
+export { makeMultiplexedTransport } from '@sentry/core/browser';
diff --git a/packages/browser/src/profiling/UIProfiler.ts b/packages/browser/src/profiling/UIProfiler.ts
index 932b442a4b6e..e64f7720710b 100644
--- a/packages/browser/src/profiling/UIProfiler.ts
+++ b/packages/browser/src/profiling/UIProfiler.ts
@@ -1,4 +1,4 @@
-import type { Client, ContinuousProfiler, ProfileChunk, ProfileChunkEnvelope, Span } from '@sentry/core';
+import type { Client, ContinuousProfiler, ProfileChunk, ProfileChunkEnvelope, Span } from '@sentry/core/browser';
import {
createEnvelope,
debug,
@@ -7,11 +7,17 @@ import {
getRootSpan,
getSdkMetadataForEnvelopeHeader,
uuid4,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { BrowserOptions } from '../client';
import { DEBUG_BUILD } from './../debug-build';
import type { JSSelfProfiler } from './jsSelfProfiling';
-import { createProfileChunkPayload, shouldProfileSession, startJSSelfProfile, validateProfileChunk } from './utils';
+import {
+ createProfileChunkPayload,
+ setThreadAttributes,
+ shouldProfileSession,
+ startJSSelfProfile,
+ validateProfileChunk,
+} from './utils';
const CHUNK_INTERVAL_MS = 60_000; // 1 minute
// Maximum length for trace lifecycle profiling per root span (e.g. if spanEnd never fires)
@@ -78,6 +84,12 @@ export class UIProfiler implements ContinuousProfiler {
if (lifecycleMode === 'trace') {
this._setupTraceLifecycleListeners(client);
}
+
+ client.on('spanStart', span => {
+ if (this._isRunning) {
+ setThreadAttributes(span);
+ }
+ });
}
/** Starts UI profiling (only effective in 'manual' mode and when sampled). */
@@ -142,6 +154,10 @@ export class UIProfiler implements ContinuousProfiler {
this._beginProfiling();
}
+
+ if (this._isRunning) {
+ setThreadAttributes(rootSpan);
+ }
}
/**
diff --git a/packages/browser/src/profiling/index.ts b/packages/browser/src/profiling/index.ts
index 5847c070dd48..3f1fe9a82ed4 100644
--- a/packages/browser/src/profiling/index.ts
+++ b/packages/browser/src/profiling/index.ts
@@ -1,5 +1,5 @@
-import type { Profiler } from '@sentry/core';
-import { debug, getClient } from '@sentry/core';
+import type { Profiler } from '@sentry/core/browser';
+import { debug, getClient } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
/**
diff --git a/packages/browser/src/profiling/integration.ts b/packages/browser/src/profiling/integration.ts
index 84cd33588320..ed438d15a7b3 100644
--- a/packages/browser/src/profiling/integration.ts
+++ b/packages/browser/src/profiling/integration.ts
@@ -1,5 +1,5 @@
-import type { EventEnvelope, IntegrationFn, Profile, Span } from '@sentry/core';
-import { debug, defineIntegration, getActiveSpan, getRootSpan, hasSpansEnabled } from '@sentry/core';
+import type { EventEnvelope, IntegrationFn, Profile, Span } from '@sentry/core/browser';
+import { debug, defineIntegration, getActiveSpan, getRootSpan, hasSpansEnabled } from '@sentry/core/browser';
import type { BrowserOptions } from '../client';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
@@ -8,12 +8,13 @@ import { UIProfiler } from './UIProfiler';
import type { ProfiledEvent } from './utils';
import {
addProfilesToEnvelope,
- attachProfiledThreadToEvent,
createProfilingEvent,
findProfiledTransactionsFromEnvelope,
getActiveProfilesCount,
hasLegacyProfiling,
isAutomatedPageLoadSpan,
+ PROFILED_ROOT_SPANS,
+ setThreadAttributes,
shouldProfileSpanLegacy,
takeProfileFromGlobalCache,
} from './utils';
@@ -92,8 +93,13 @@ const _browserProfilingIntegration = (() => {
}
client.on('spanStart', (span: Span) => {
- if (span === getRootSpan(span) && shouldProfileSpanLegacy(span)) {
- startProfileForSpan(span);
+ const rootSpan = getRootSpan(span);
+ if (span === rootSpan) {
+ if (shouldProfileSpanLegacy(span)) {
+ startProfileForSpan(span);
+ }
+ } else if (PROFILED_ROOT_SPANS.has(rootSpan)) {
+ setThreadAttributes(span);
}
});
@@ -151,9 +157,6 @@ const _browserProfilingIntegration = (() => {
});
}
},
- processEvent(event) {
- return attachProfiledThreadToEvent(event);
- },
};
}) satisfies IntegrationFn;
diff --git a/packages/browser/src/profiling/startProfileForSpan.ts b/packages/browser/src/profiling/startProfileForSpan.ts
index 6eaaa016d822..974cf9e8e9b2 100644
--- a/packages/browser/src/profiling/startProfileForSpan.ts
+++ b/packages/browser/src/profiling/startProfileForSpan.ts
@@ -1,9 +1,16 @@
-import type { Span } from '@sentry/core';
-import { debug, getCurrentScope, spanToJSON, timestampInSeconds, uuid4 } from '@sentry/core';
+import type { Span } from '@sentry/core/browser';
+import { debug, getCurrentScope, spanToJSON, timestampInSeconds, uuid4 } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
import type { JSSelfProfile } from './jsSelfProfiling';
-import { addProfileToGlobalCache, isAutomatedPageLoadSpan, MAX_PROFILE_DURATION_MS, startJSSelfProfile } from './utils';
+import {
+ addProfileToGlobalCache,
+ isAutomatedPageLoadSpan,
+ MAX_PROFILE_DURATION_MS,
+ PROFILED_ROOT_SPANS,
+ setThreadAttributes,
+ startJSSelfProfile,
+} from './utils';
/**
* Wraps startTransaction and stopTransaction with profiling related logic.
@@ -48,6 +55,9 @@ export function startProfileForSpan(span: Span): void {
start_timestamp: startTimestamp,
});
+ PROFILED_ROOT_SPANS.add(span);
+ setThreadAttributes(span);
+
/**
* Idempotent handler for profile stop
*/
diff --git a/packages/browser/src/profiling/utils.ts b/packages/browser/src/profiling/utils.ts
index f0d067c841d8..0d7d413428dd 100644
--- a/packages/browser/src/profiling/utils.ts
+++ b/packages/browser/src/profiling/utils.ts
@@ -10,7 +10,7 @@ import type {
ProfileChunk,
Span,
ThreadCpuProfile,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
browserPerformanceTimeOrigin,
debug,
@@ -22,7 +22,7 @@ import {
spanToJSON,
timestampInSeconds,
uuid4,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { BrowserOptions } from '../client';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
@@ -786,39 +786,9 @@ export function addProfileToGlobalCache(profile_id: string, profile: JSSelfProfi
}
}
-/**
- * Attaches the profiled thread information to the event's trace context.
- */
-export function attachProfiledThreadToEvent(event: Event): Event {
- if (!event?.contexts?.profile) {
- return event;
- }
-
- if (!event.contexts) {
- return event;
- }
-
- // Only mutate the trace context when it already has a trace_id — that
- // guarantees `applySpanToEvent` has already run, and we are not creating a partial trace context from scratch.
- if (event.contexts.trace?.trace_id) {
- event.contexts.trace = {
- ...event.contexts.trace,
- data: {
- ...(event.contexts.trace.data ?? {}),
- ['thread.id']: PROFILER_THREAD_ID_STRING,
- ['thread.name']: PROFILER_THREAD_NAME,
- },
- };
- }
-
- // Attach thread info to individual spans so that spans can be associated with the profiled thread on the UI even if contexts are missing.
- event.spans?.forEach(span => {
- span.data = {
- ...(span.data || {}),
- ['thread.id']: PROFILER_THREAD_ID_STRING,
- ['thread.name']: PROFILER_THREAD_NAME,
- };
- });
+export const PROFILED_ROOT_SPANS = new WeakSet();
- return event;
+export function setThreadAttributes(span: Span): void {
+ span.setAttribute('thread.id', PROFILER_THREAD_ID_STRING);
+ span.setAttribute('thread.name', PROFILER_THREAD_NAME);
}
diff --git a/packages/browser/src/report-dialog.ts b/packages/browser/src/report-dialog.ts
index 03255a7db91d..beb6f7ec9c08 100644
--- a/packages/browser/src/report-dialog.ts
+++ b/packages/browser/src/report-dialog.ts
@@ -1,5 +1,5 @@
-import type { ReportDialogOptions } from '@sentry/core';
-import { debug, getClient, getCurrentScope, getReportDialogEndpoint, lastEventId } from '@sentry/core';
+import type { ReportDialogOptions } from '@sentry/core/browser';
+import { debug, getClient, getCurrentScope, getReportDialogEndpoint, lastEventId } from '@sentry/core/browser';
import { DEBUG_BUILD } from './debug-build';
import { WINDOW } from './helpers';
diff --git a/packages/browser/src/sdk.ts b/packages/browser/src/sdk.ts
index 8a457f858f3c..68a48ec461d1 100644
--- a/packages/browser/src/sdk.ts
+++ b/packages/browser/src/sdk.ts
@@ -1,4 +1,4 @@
-import type { Client, Integration, Options } from '@sentry/core';
+import type { Client, Integration, Options } from '@sentry/core/browser';
import {
conversationIdIntegration,
dedupeIntegration,
@@ -7,7 +7,7 @@ import {
inboundFiltersIntegration,
initAndBind,
stackParserFromStackParserOptions,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { BrowserClientOptions, BrowserOptions } from './client';
import { BrowserClient } from './client';
import { breadcrumbsIntegration } from './integrations/breadcrumbs';
diff --git a/packages/browser/src/stack-parsers.ts b/packages/browser/src/stack-parsers.ts
index cb74bc1e6ce6..9214dac42707 100644
--- a/packages/browser/src/stack-parsers.ts
+++ b/packages/browser/src/stack-parsers.ts
@@ -23,8 +23,8 @@
// CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
// OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-import type { StackFrame, StackLineParser, StackLineParserFn } from '@sentry/core';
-import { createStackParser, UNKNOWN_FUNCTION } from '@sentry/core';
+import type { StackFrame, StackLineParser, StackLineParserFn } from '@sentry/core/browser';
+import { createStackParser, UNKNOWN_FUNCTION } from '@sentry/core/browser';
const OPERA10_PRIORITY = 10;
const OPERA11_PRIORITY = 20;
diff --git a/packages/browser/src/tracing/backgroundtab.ts b/packages/browser/src/tracing/backgroundtab.ts
index f8aeca761d85..c484bc0e52c2 100644
--- a/packages/browser/src/tracing/backgroundtab.ts
+++ b/packages/browser/src/tracing/backgroundtab.ts
@@ -1,4 +1,4 @@
-import { debug, getActiveSpan, getRootSpan, SPAN_STATUS_ERROR, spanToJSON } from '@sentry/core';
+import { debug, getActiveSpan, getRootSpan, SPAN_STATUS_ERROR, spanToJSON } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
diff --git a/packages/browser/src/tracing/browserTracingIntegration.ts b/packages/browser/src/tracing/browserTracingIntegration.ts
index 34a9609099ad..08acf99761fc 100644
--- a/packages/browser/src/tracing/browserTracingIntegration.ts
+++ b/packages/browser/src/tracing/browserTracingIntegration.ts
@@ -7,7 +7,7 @@ import type {
Span,
StartSpanOptions,
TransactionSource,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
addNonEnumerableProperty,
browserPerformanceTimeOrigin,
@@ -36,7 +36,7 @@ import {
startInactiveSpan,
timestampInSeconds,
TRACING_DEFAULTS,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
addHistoryInstrumentationHandler,
addPerformanceEntries,
diff --git a/packages/browser/src/tracing/linkedTraces.ts b/packages/browser/src/tracing/linkedTraces.ts
index 4768ac7c66a1..8b13ff99fb35 100644
--- a/packages/browser/src/tracing/linkedTraces.ts
+++ b/packages/browser/src/tracing/linkedTraces.ts
@@ -1,4 +1,4 @@
-import type { Client, PropagationContext, Span, SpanContextData } from '@sentry/core';
+import type { Client, PropagationContext, Span, SpanContextData } from '@sentry/core/browser';
import {
debug,
getCurrentScope,
@@ -7,7 +7,7 @@ import {
SEMANTIC_ATTRIBUTE_SENTRY_SAMPLE_RATE,
SEMANTIC_LINK_ATTRIBUTE_LINK_TYPE,
spanToJSON,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../exports';
@@ -231,7 +231,7 @@ export function getPreviousTraceFromSessionStorage(): PreviousTraceInfo | undefi
}
/**
- * see {@link import('@sentry/core').spanIsSampled}
+ * see {@link import('@sentry/core/browser').spanIsSampled}
*/
export function spanContextSampled(ctx: SpanContextData): boolean {
return ctx.traceFlags === 0x1;
diff --git a/packages/browser/src/tracing/reportPageLoaded.ts b/packages/browser/src/tracing/reportPageLoaded.ts
index 2d3d4a4991a4..fbd5e3a49933 100644
--- a/packages/browser/src/tracing/reportPageLoaded.ts
+++ b/packages/browser/src/tracing/reportPageLoaded.ts
@@ -1,5 +1,5 @@
-import type { Client } from '@sentry/core';
-import { getClient } from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import { getClient } from '@sentry/core/browser';
/**
* Manually report the end of the page load, resulting in the SDK ending the pageload span.
diff --git a/packages/browser/src/tracing/request.ts b/packages/browser/src/tracing/request.ts
index 9cbf45563f0b..a1a281bb54dc 100644
--- a/packages/browser/src/tracing/request.ts
+++ b/packages/browser/src/tracing/request.ts
@@ -7,7 +7,7 @@ import type {
SentryWrappedXMLHttpRequest,
Span,
SpanTimeInput,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import {
addFetchEndInstrumentationHandler,
addFetchInstrumentationHandler,
@@ -29,7 +29,7 @@ import {
stripDataUrlContent,
stripUrlQueryAndFragment,
timestampInSeconds,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { XhrHint } from '@sentry-internal/browser-utils';
import {
addPerformanceInstrumentationHandler,
diff --git a/packages/browser/src/tracing/setActiveSpan.ts b/packages/browser/src/tracing/setActiveSpan.ts
index 5e3b537d4b6d..be1cb52adc06 100644
--- a/packages/browser/src/tracing/setActiveSpan.ts
+++ b/packages/browser/src/tracing/setActiveSpan.ts
@@ -1,5 +1,5 @@
-import type { Span } from '@sentry/core';
-import { _INTERNAL_setSpanForScope, getActiveSpan, getCurrentScope } from '@sentry/core';
+import type { Span } from '@sentry/core/browser';
+import { _INTERNAL_setSpanForScope, getActiveSpan, getCurrentScope } from '@sentry/core/browser';
/**
* Sets an inactive span active on the current scope.
diff --git a/packages/browser/src/transports/fetch.ts b/packages/browser/src/transports/fetch.ts
index f6683c7005de..f5b3f2d780e8 100644
--- a/packages/browser/src/transports/fetch.ts
+++ b/packages/browser/src/transports/fetch.ts
@@ -1,5 +1,5 @@
-import type { Transport, TransportMakeRequestResponse, TransportRequest } from '@sentry/core';
-import { createTransport, makePromiseBuffer } from '@sentry/core';
+import type { Transport, TransportMakeRequestResponse, TransportRequest } from '@sentry/core/browser';
+import { createTransport, makePromiseBuffer } from '@sentry/core/browser';
import { clearCachedImplementation, getNativeImplementation } from '@sentry-internal/browser-utils';
import type { WINDOW } from '../helpers';
import type { BrowserTransportOptions } from './types';
diff --git a/packages/browser/src/transports/offline.ts b/packages/browser/src/transports/offline.ts
index c644e72fbf39..c965e3aa5c15 100644
--- a/packages/browser/src/transports/offline.ts
+++ b/packages/browser/src/transports/offline.ts
@@ -1,5 +1,11 @@
-import type { BaseTransportOptions, Envelope, OfflineStore, OfflineTransportOptions, Transport } from '@sentry/core';
-import { makeOfflineTransport, parseEnvelope, serializeEnvelope } from '@sentry/core';
+import type {
+ BaseTransportOptions,
+ Envelope,
+ OfflineStore,
+ OfflineTransportOptions,
+ Transport,
+} from '@sentry/core/browser';
+import { makeOfflineTransport, parseEnvelope, serializeEnvelope } from '@sentry/core/browser';
import { WINDOW } from '../helpers';
import { makeFetchTransport } from './fetch';
diff --git a/packages/browser/src/transports/types.ts b/packages/browser/src/transports/types.ts
index a304e9f93d66..e2a00e9a12d8 100644
--- a/packages/browser/src/transports/types.ts
+++ b/packages/browser/src/transports/types.ts
@@ -1,4 +1,4 @@
-import type { BaseTransportOptions } from '@sentry/core';
+import type { BaseTransportOptions } from '@sentry/core/browser';
export interface BrowserTransportOptions extends BaseTransportOptions {
/** Fetch API init parameters. Used by the FetchTransport */
diff --git a/packages/browser/src/userfeedback.ts b/packages/browser/src/userfeedback.ts
index bc6fd248b480..f77699dd1c75 100644
--- a/packages/browser/src/userfeedback.ts
+++ b/packages/browser/src/userfeedback.ts
@@ -1,5 +1,5 @@
-import type { DsnComponents, EventEnvelope, SdkMetadata, UserFeedback, UserFeedbackItem } from '@sentry/core';
-import { createEnvelope, dsnToString } from '@sentry/core';
+import type { DsnComponents, EventEnvelope, SdkMetadata, UserFeedback, UserFeedbackItem } from '@sentry/core/browser';
+import { createEnvelope, dsnToString } from '@sentry/core/browser';
/**
* Creates an envelope from a user feedback.
diff --git a/packages/browser/src/utils/detectBrowserExtension.ts b/packages/browser/src/utils/detectBrowserExtension.ts
index 95ad7cebcf06..86ba33ba7727 100644
--- a/packages/browser/src/utils/detectBrowserExtension.ts
+++ b/packages/browser/src/utils/detectBrowserExtension.ts
@@ -1,4 +1,4 @@
-import { consoleSandbox, getLocationHref } from '@sentry/core';
+import { consoleSandbox, getLocationHref } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import { WINDOW } from '../helpers';
diff --git a/packages/browser/src/utils/lazyLoadIntegration.ts b/packages/browser/src/utils/lazyLoadIntegration.ts
index a24da592faff..f348681adba5 100644
--- a/packages/browser/src/utils/lazyLoadIntegration.ts
+++ b/packages/browser/src/utils/lazyLoadIntegration.ts
@@ -1,5 +1,5 @@
-import type { IntegrationFn } from '@sentry/core';
-import { getClient, SDK_VERSION } from '@sentry/core';
+import type { IntegrationFn } from '@sentry/core/browser';
+import { getClient, SDK_VERSION } from '@sentry/core/browser';
import type { BrowserClient } from '../client';
import { WINDOW } from '../helpers';
diff --git a/packages/browser/test/client.test.ts b/packages/browser/test/client.test.ts
index 27135b4fc9a9..9861c9e428ec 100644
--- a/packages/browser/test/client.test.ts
+++ b/packages/browser/test/client.test.ts
@@ -2,14 +2,14 @@
* @vitest-environment jsdom
*/
-import * as sentryCore from '@sentry/core';
-import { Scope } from '@sentry/core';
+import * as sentryCore from '@sentry/core/browser';
+import { Scope } from '@sentry/core/browser';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { applyDefaultOptions, BrowserClient } from '../src/client';
import { WINDOW } from '../src/helpers';
import { getDefaultBrowserClientOptions } from './helper/browser-client-options';
-vi.mock('@sentry/core', async requireActual => {
+vi.mock('@sentry/core/browser', async requireActual => {
return {
...((await requireActual()) as any),
_INTERNAL_flushLogsBuffer: vi.fn(),
diff --git a/packages/browser/test/diagnose-sdk.test.ts b/packages/browser/test/diagnose-sdk.test.ts
index 85b60047361e..6161891424b2 100644
--- a/packages/browser/test/diagnose-sdk.test.ts
+++ b/packages/browser/test/diagnose-sdk.test.ts
@@ -2,13 +2,13 @@
* @vitest-environment jsdom
*/
-import type { Client } from '@sentry/core';
-import * as sentryCore from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import * as sentryCore from '@sentry/core/browser';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { diagnoseSdkConnectivity } from '../src/diagnose-sdk';
// Mock the @sentry/core module
-vi.mock('@sentry/core', async requireActual => {
+vi.mock('@sentry/core/browser', async requireActual => {
return {
...((await requireActual()) as any),
getClient: vi.fn(),
diff --git a/packages/browser/test/eventbuilder.test.ts b/packages/browser/test/eventbuilder.test.ts
index ef233ed58a1f..bc99c9c95e49 100644
--- a/packages/browser/test/eventbuilder.test.ts
+++ b/packages/browser/test/eventbuilder.test.ts
@@ -2,12 +2,12 @@
* @vitest-environment jsdom
*/
-import { addNonEnumerableProperty } from '@sentry/core';
+import { addNonEnumerableProperty } from '@sentry/core/browser';
import { afterEach, describe, expect, it, vi } from 'vitest';
import { defaultStackParser } from '../src';
import { eventFromMessage, eventFromUnknownInput, extractMessage, extractType } from '../src/eventbuilder';
-vi.mock('@sentry/core', async requireActual => {
+vi.mock('@sentry/core/browser', async requireActual => {
return {
...((await requireActual()) as any),
getClient() {
diff --git a/packages/browser/test/helper/browser-client-options.ts b/packages/browser/test/helper/browser-client-options.ts
index 237f3cec0e05..825bfb47a672 100644
--- a/packages/browser/test/helper/browser-client-options.ts
+++ b/packages/browser/test/helper/browser-client-options.ts
@@ -1,4 +1,4 @@
-import { createTransport, resolvedSyncPromise } from '@sentry/core';
+import { createTransport, resolvedSyncPromise } from '@sentry/core/browser';
import type { BrowserClientOptions } from '../../src/client';
export function getDefaultBrowserClientOptions(options: Partial = {}): BrowserClientOptions {
diff --git a/packages/browser/test/helpers.test.ts b/packages/browser/test/helpers.test.ts
index 3031ceae19aa..9a14db35c872 100644
--- a/packages/browser/test/helpers.test.ts
+++ b/packages/browser/test/helpers.test.ts
@@ -1,4 +1,4 @@
-import type { WrappedFunction } from '@sentry/core';
+import type { WrappedFunction } from '@sentry/core/browser';
import { describe, expect, it, vi } from 'vitest';
import { wrap } from '../src/helpers';
diff --git a/packages/browser/test/index.bundle.logs.metrics.test.ts b/packages/browser/test/index.bundle.logs.metrics.test.ts
index 7d450dc1ced0..31d4254b9433 100644
--- a/packages/browser/test/index.bundle.logs.metrics.test.ts
+++ b/packages/browser/test/index.bundle.logs.metrics.test.ts
@@ -1,4 +1,4 @@
-import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core';
+import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core/browser';
import { spanStreamingIntegrationShim } from '@sentry-internal/integration-shims';
import { describe, expect, it } from 'vitest';
import * as LogsMetricsBundle from '../src/index.bundle.logs.metrics';
diff --git a/packages/browser/test/index.bundle.replay.logs.metrics.test.ts b/packages/browser/test/index.bundle.replay.logs.metrics.test.ts
index d6bb995fae09..5e39b4111b63 100644
--- a/packages/browser/test/index.bundle.replay.logs.metrics.test.ts
+++ b/packages/browser/test/index.bundle.replay.logs.metrics.test.ts
@@ -1,4 +1,4 @@
-import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core';
+import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core/browser';
import {
browserTracingIntegrationShim,
feedbackIntegrationShim,
diff --git a/packages/browser/test/index.bundle.tracing.logs.metrics.test.ts b/packages/browser/test/index.bundle.tracing.logs.metrics.test.ts
index 483a4ae8a1f5..65cc58fbf028 100644
--- a/packages/browser/test/index.bundle.tracing.logs.metrics.test.ts
+++ b/packages/browser/test/index.bundle.tracing.logs.metrics.test.ts
@@ -1,4 +1,4 @@
-import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core';
+import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core/browser';
import { feedbackIntegrationShim, replayIntegrationShim } from '@sentry-internal/integration-shims';
import { describe, expect, it } from 'vitest';
import { browserTracingIntegration, spanStreamingIntegration } from '../src';
diff --git a/packages/browser/test/index.bundle.tracing.replay.feedback.logs.metrics.test.ts b/packages/browser/test/index.bundle.tracing.replay.feedback.logs.metrics.test.ts
index 0c474b195bc8..e4b88fab24d7 100644
--- a/packages/browser/test/index.bundle.tracing.replay.feedback.logs.metrics.test.ts
+++ b/packages/browser/test/index.bundle.tracing.replay.feedback.logs.metrics.test.ts
@@ -1,4 +1,4 @@
-import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core';
+import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core/browser';
import { describe, expect, it } from 'vitest';
import {
browserTracingIntegration,
diff --git a/packages/browser/test/index.bundle.tracing.replay.logs.metrics.test.ts b/packages/browser/test/index.bundle.tracing.replay.logs.metrics.test.ts
index 4848de24caea..f8571872ba95 100644
--- a/packages/browser/test/index.bundle.tracing.replay.logs.metrics.test.ts
+++ b/packages/browser/test/index.bundle.tracing.replay.logs.metrics.test.ts
@@ -1,4 +1,4 @@
-import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core';
+import { logger as coreLogger, metrics as coreMetrics } from '@sentry/core/browser';
import { feedbackIntegrationShim } from '@sentry-internal/integration-shims';
import { describe, expect, it } from 'vitest';
import { browserTracingIntegration, replayIntegration, spanStreamingIntegration } from '../src';
diff --git a/packages/browser/test/index.test.ts b/packages/browser/test/index.test.ts
index 9a30596832e4..b662352bf3e3 100644
--- a/packages/browser/test/index.test.ts
+++ b/packages/browser/test/index.test.ts
@@ -9,8 +9,8 @@ import {
getReportDialogEndpoint,
lastEventId,
SDK_VERSION,
-} from '@sentry/core';
-import * as utils from '@sentry/core';
+} from '@sentry/core/browser';
+import * as utils from '@sentry/core/browser';
import type { Mock } from 'vitest';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import {
@@ -37,7 +37,7 @@ const dsn = 'https://53039209a22b4ec1bcc296a3c9fdecd6@sentry.io/4291';
// eslint-disable-next-line no-var
declare var global: any;
-vi.mock('@sentry/core', async requireActual => {
+vi.mock('@sentry/core/browser', async requireActual => {
return {
...((await requireActual()) as any),
getReportDialogEndpoint: vi.fn(),
diff --git a/packages/browser/test/integrations/breadcrumbs.test.ts b/packages/browser/test/integrations/breadcrumbs.test.ts
index 56df65c3a6ae..3ee91da9be13 100644
--- a/packages/browser/test/integrations/breadcrumbs.test.ts
+++ b/packages/browser/test/integrations/breadcrumbs.test.ts
@@ -1,4 +1,4 @@
-import * as SentryCore from '@sentry/core';
+import * as SentryCore from '@sentry/core/browser';
import { describe, expect, it, vi } from 'vitest';
import { breadcrumbsIntegration, BrowserClient, flush } from '../../src';
import { getDefaultBrowserClientOptions } from '../helper/browser-client-options';
diff --git a/packages/browser/test/integrations/contextlines.test.ts b/packages/browser/test/integrations/contextlines.test.ts
index e34479cd644e..ffb062759d9c 100644
--- a/packages/browser/test/integrations/contextlines.test.ts
+++ b/packages/browser/test/integrations/contextlines.test.ts
@@ -1,4 +1,4 @@
-import type { StackFrame } from '@sentry/core';
+import type { StackFrame } from '@sentry/core/browser';
import { describe, expect, it } from 'vitest';
import { applySourceContextToFrame } from '../../src/integrations/contextlines';
diff --git a/packages/browser/test/integrations/graphqlClient.test.ts b/packages/browser/test/integrations/graphqlClient.test.ts
index 1c4ab60d30f2..0ff1fb43161f 100644
--- a/packages/browser/test/integrations/graphqlClient.test.ts
+++ b/packages/browser/test/integrations/graphqlClient.test.ts
@@ -2,8 +2,8 @@
* @vitest-environment jsdom
*/
-import type { Client } from '@sentry/core';
-import { SentrySpan, spanToJSON } from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import { SentrySpan, spanToJSON } from '@sentry/core/browser';
import type { FetchHint, XhrHint } from '@sentry-internal/browser-utils';
import { SENTRY_XHR_DATA_KEY } from '@sentry-internal/browser-utils';
import { describe, expect, test } from 'vitest';
diff --git a/packages/browser/test/integrations/reportingobserver.test.ts b/packages/browser/test/integrations/reportingobserver.test.ts
index 4376977a1b1a..18ef38b7a097 100644
--- a/packages/browser/test/integrations/reportingobserver.test.ts
+++ b/packages/browser/test/integrations/reportingobserver.test.ts
@@ -1,5 +1,5 @@
-import type { Client } from '@sentry/core';
-import * as SentryCore from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import * as SentryCore from '@sentry/core/browser';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { reportingObserverIntegration } from '../../src/integrations/reportingobserver';
diff --git a/packages/browser/test/integrations/spanstreaming.test.ts b/packages/browser/test/integrations/spanstreaming.test.ts
index 1d5d587290a3..cc51ef6e6c17 100644
--- a/packages/browser/test/integrations/spanstreaming.test.ts
+++ b/packages/browser/test/integrations/spanstreaming.test.ts
@@ -1,5 +1,5 @@
-import * as SentryCore from '@sentry/core';
-import { debug } from '@sentry/core';
+import * as SentryCore from '@sentry/core/browser';
+import { debug } from '@sentry/core/browser';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { BrowserClient, spanStreamingIntegration } from '../../src';
import { getDefaultBrowserClientOptions } from '../helper/browser-client-options';
@@ -15,8 +15,8 @@ const MockSpanBuffer = vi.hoisted(() => {
return vi.fn(() => mockSpanBufferInstance);
});
-vi.mock('@sentry/core', async () => {
- const original = await vi.importActual('@sentry/core');
+vi.mock('@sentry/core/browser', async () => {
+ const original = await vi.importActual('@sentry/core/browser');
return {
...original,
SpanBuffer: MockSpanBuffer,
diff --git a/packages/browser/test/integrations/webWorker.test.ts b/packages/browser/test/integrations/webWorker.test.ts
index c239e31bd638..e824486456da 100644
--- a/packages/browser/test/integrations/webWorker.test.ts
+++ b/packages/browser/test/integrations/webWorker.test.ts
@@ -2,13 +2,13 @@
* @vitest-environment jsdom
*/
-import * as SentryCore from '@sentry/core';
+import * as SentryCore from '@sentry/core/browser';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import * as helpers from '../../src/helpers';
import { INTEGRATION_NAME, registerWebWorker, webWorkerIntegration } from '../../src/integrations/webWorker';
// Mock @sentry/core
-vi.mock('@sentry/core', async importActual => {
+vi.mock('@sentry/core/browser', async importActual => {
return {
...((await importActual()) as any),
debug: {
diff --git a/packages/browser/test/mocks/simpletransport.ts b/packages/browser/test/mocks/simpletransport.ts
index 515b0f9c0865..9f2f527fba7d 100644
--- a/packages/browser/test/mocks/simpletransport.ts
+++ b/packages/browser/test/mocks/simpletransport.ts
@@ -1,4 +1,4 @@
-import { createTransport, resolvedSyncPromise } from '@sentry/core';
+import { createTransport, resolvedSyncPromise } from '@sentry/core/browser';
export function makeSimpleTransport() {
return createTransport({ recordDroppedEvent: () => undefined }, () => resolvedSyncPromise({}));
diff --git a/packages/browser/test/profiling/UIProfiler.test.ts b/packages/browser/test/profiling/UIProfiler.test.ts
index 456c5c222b22..19c72dffcacd 100644
--- a/packages/browser/test/profiling/UIProfiler.test.ts
+++ b/packages/browser/test/profiling/UIProfiler.test.ts
@@ -3,7 +3,7 @@
*/
import * as Sentry from '@sentry/browser';
-import { debug, type Span } from '@sentry/core';
+import { debug, type Span } from '@sentry/core/browser';
import { afterEach, beforeEach, describe, expect, it, type Mock, vi } from 'vitest';
import type { BrowserOptions } from '../../src/index';
@@ -583,6 +583,100 @@ describe('Browser Profiling v2 trace lifecycle', () => {
});
});
+ describe('thread attributes', () => {
+ it('sets thread.id and thread.name on root span', async () => {
+ vi.useRealTimers();
+ mockProfiler();
+ const send = vi.fn().mockResolvedValue(undefined);
+
+ Sentry.init({
+ ...getBaseOptionsForTraceLifecycle(send),
+ });
+
+ Sentry.startSpan({ name: 'root-thread-attrs', parentSpan: null, forceTransaction: true }, () => {
+ /* empty */
+ });
+
+ const client = Sentry.getClient();
+ await client?.flush(1000);
+
+ const calls = send.mock.calls;
+ const txnCall = calls.find(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ const transaction = txnCall?.[0]?.[1]?.[0]?.[1];
+
+ expect(transaction.transaction).toBe('root-thread-attrs');
+ expect(transaction.contexts.trace.data['thread.id']).toBe('0');
+ expect(transaction.contexts.trace.data['thread.name']).toBe('main');
+ });
+
+ it('sets thread.id and thread.name on child spans', async () => {
+ vi.useRealTimers();
+ mockProfiler();
+ const send = vi.fn().mockResolvedValue(undefined);
+
+ Sentry.init({
+ ...getBaseOptionsForTraceLifecycle(send),
+ });
+
+ Sentry.startSpan({ name: 'root-with-children', parentSpan: null, forceTransaction: true }, () => {
+ Sentry.startSpan({ name: 'child-span-1' }, () => {
+ /* empty */
+ });
+ Sentry.startSpan({ name: 'child-span-2' }, () => {
+ /* empty */
+ });
+ });
+
+ const client = Sentry.getClient();
+ await client?.flush(1000);
+
+ const calls = send.mock.calls;
+ const txnCall = calls.find(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ const transaction = txnCall?.[0]?.[1]?.[0]?.[1];
+
+ expect(transaction.transaction).toBe('root-with-children');
+ expect(transaction.spans).toHaveLength(2);
+ for (const span of transaction.spans) {
+ expect(span.data['thread.id']).toBe('0');
+ expect(span.data['thread.name']).toBe('main');
+ }
+ });
+
+ it('does not set thread attributes when session is not sampled', async () => {
+ vi.useRealTimers();
+ mockProfiler();
+ const send = vi.fn().mockResolvedValue(undefined);
+
+ Sentry.init({
+ ...getBaseOptionsForTraceLifecycle(send),
+ profileSessionSampleRate: 0,
+ });
+
+ Sentry.startSpan({ name: 'unsampled-root', parentSpan: null, forceTransaction: true }, () => {
+ Sentry.startSpan({ name: 'unsampled-child' }, () => {
+ /* empty */
+ });
+ });
+
+ const client = Sentry.getClient();
+ await client?.flush(1000);
+
+ const calls = send.mock.calls;
+ const txnCall = calls.find(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ expect(txnCall).toBeDefined();
+
+ const transaction = txnCall?.[0]?.[1]?.[0]?.[1];
+ expect(transaction.transaction).toBe('unsampled-root');
+ expect(transaction.contexts.trace.data['thread.id']).toBeUndefined();
+ expect(transaction.contexts.trace.data['thread.name']).toBeUndefined();
+
+ expect(transaction.spans).toHaveLength(1);
+ expect(transaction.spans[0].description).toBe('unsampled-child');
+ expect(transaction.spans[0].data['thread.id']).toBeUndefined();
+ expect(transaction.spans[0].data['thread.name']).toBeUndefined();
+ });
+ });
+
it('calling start and stop in trace lifecycle prints warnings', async () => {
const { stop } = mockProfiler();
const send = vi.fn().mockResolvedValue(undefined);
@@ -848,4 +942,83 @@ describe('Browser Profiling v2 manual lifecycle', () => {
expect(firstProfilerId).toBe(thirdProfilerId); // same profiler_id across session
});
});
+
+ describe('thread attributes', () => {
+ it('sets thread.id and thread.name on spans created while profiling is active', async () => {
+ vi.useRealTimers();
+ mockProfiler();
+ const send = vi.fn().mockResolvedValue(undefined);
+
+ Sentry.init({
+ ...getBaseOptionsForManualLifecycle(send),
+ });
+
+ Sentry.uiProfiler.startProfiler();
+
+ Sentry.startSpan({ name: 'manual-profiled-root', parentSpan: null, forceTransaction: true }, () => {
+ Sentry.startSpan({ name: 'manual-profiled-child' }, () => {
+ /* empty */
+ });
+ });
+
+ Sentry.uiProfiler.stopProfiler();
+ await Promise.resolve();
+
+ const client = Sentry.getClient();
+ await client?.flush(1000);
+
+ const calls = send.mock.calls;
+ const txnCall = calls.find(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ const transaction = txnCall?.[0]?.[1]?.[0]?.[1];
+
+ expect(transaction.transaction).toBe('manual-profiled-root');
+ expect(transaction.contexts.trace.data['thread.id']).toBe('0');
+ expect(transaction.contexts.trace.data['thread.name']).toBe('main');
+
+ expect(transaction.spans).toHaveLength(1);
+ expect(transaction.spans[0].description).toBe('manual-profiled-child');
+ expect(transaction.spans[0].data['thread.id']).toBe('0');
+ expect(transaction.spans[0].data['thread.name']).toBe('main');
+ });
+
+ it('does not set thread attributes on spans created outside of profiling window', async () => {
+ vi.useRealTimers();
+ mockProfiler();
+ const send = vi.fn().mockResolvedValue(undefined);
+
+ Sentry.init({
+ ...getBaseOptionsForManualLifecycle(send),
+ });
+
+ // Create span BEFORE profiling starts
+ Sentry.startSpan({ name: 'before-profiling', parentSpan: null, forceTransaction: true }, () => {
+ /* empty */
+ });
+
+ Sentry.uiProfiler.startProfiler();
+ Sentry.uiProfiler.stopProfiler();
+ await Promise.resolve();
+
+ // Create span AFTER profiling stops
+ Sentry.startSpan({ name: 'after-profiling', parentSpan: null, forceTransaction: true }, () => {
+ /* empty */
+ });
+
+ const client = Sentry.getClient();
+ await client?.flush(1000);
+
+ const calls = send.mock.calls;
+ const txnCalls = calls.filter(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ const transactions = txnCalls.map(call => call?.[0]?.[1]?.[0]?.[1]);
+
+ expect(transactions).toHaveLength(2);
+ expect(transactions[0].transaction).toBe('before-profiling');
+ expect(transactions[1].transaction).toBe('after-profiling');
+
+ for (const transaction of transactions) {
+ expect(transaction.contexts.trace.data['thread.id']).toBeUndefined();
+ expect(transaction.contexts.trace.data['thread.name']).toBeUndefined();
+ }
+ });
+ });
});
diff --git a/packages/browser/test/profiling/integration.test.ts b/packages/browser/test/profiling/integration.test.ts
index a08db412ccec..b281c54c578c 100644
--- a/packages/browser/test/profiling/integration.test.ts
+++ b/packages/browser/test/profiling/integration.test.ts
@@ -3,7 +3,7 @@
*/
import * as Sentry from '@sentry/browser';
-import { debug } from '@sentry/core';
+import { debug } from '@sentry/core/browser';
import { describe, expect, it, vi } from 'vitest';
import type { BrowserClient } from '../../src/index';
import type { JSSelfProfile } from '../../src/profiling/jsSelfProfiling';
@@ -108,4 +108,64 @@ describe('BrowserProfilingIntegration', () => {
const lifecycle = client?.getOptions()?.profileLifecycle;
expect(lifecycle).toBe('manual');
});
+
+ describe('legacy profiling thread attributes', () => {
+ it('sets thread.id and thread.name on root span and child spans', async () => {
+ class MockProfiler {
+ stopped: boolean = false;
+ constructor(_opts: { sampleInterval: number; maxBufferSize: number }) {}
+ stop() {
+ this.stopped = true;
+ return Promise.resolve({
+ frames: [{ name: 'test_fn', line: 1, column: 1 }],
+ stacks: [{ frameId: 0, parentId: undefined }],
+ samples: [
+ { stackId: 0, timestamp: 0 },
+ { stackId: 0, timestamp: 100 },
+ ],
+ resources: [],
+ } as JSSelfProfile);
+ }
+ }
+
+ // @ts-expect-error this is a mock constructor
+ window.Profiler = MockProfiler;
+
+ const send = vi.fn().mockResolvedValue(undefined);
+ const client = Sentry.init({
+ tracesSampleRate: 1,
+ profilesSampleRate: 1,
+ dsn: 'https://7fa19397baaf433f919fbe02228d5470@o1137848.ingest.sentry.io/6625302',
+ transport: _opts => ({
+ flush: vi.fn().mockResolvedValue(true),
+ send,
+ }),
+ integrations: [Sentry.browserProfilingIntegration()],
+ });
+
+ Sentry.startSpan({ name: 'legacy-root', parentSpan: null, forceTransaction: true }, () => {
+ Sentry.startSpan({ name: 'legacy-child' }, () => {
+ /* empty */
+ });
+ });
+
+ await client!.flush(1000);
+
+ const txnCall = send.mock.calls.find(call => call?.[0]?.[1]?.[0]?.[0]?.type === 'transaction');
+ expect(txnCall).toBeDefined();
+
+ const transaction = txnCall?.[0]?.[1]?.[0]?.[1];
+
+ // Root span thread attributes are in contexts.trace.data
+ expect(transaction.contexts.trace.data['thread.id']).toBe('0');
+ expect(transaction.contexts.trace.data['thread.name']).toBe('main');
+
+ // Child span thread attributes
+ expect(transaction.spans).toHaveLength(1);
+ expect(transaction.spans[0].data['thread.id']).toBe('0');
+ expect(transaction.spans[0].data['thread.name']).toBe('main');
+
+ (window as any).Profiler = undefined;
+ });
+ });
});
diff --git a/packages/browser/test/sdk.test.ts b/packages/browser/test/sdk.test.ts
index b7972797182f..3d84da69e565 100644
--- a/packages/browser/test/sdk.test.ts
+++ b/packages/browser/test/sdk.test.ts
@@ -3,9 +3,9 @@
*/
/* eslint-disable @typescript-eslint/unbound-method */
-import type { Integration } from '@sentry/core';
-import * as SentryCore from '@sentry/core';
-import { createTransport, resolvedSyncPromise } from '@sentry/core';
+import type { Integration } from '@sentry/core/browser';
+import * as SentryCore from '@sentry/core/browser';
+import { createTransport, resolvedSyncPromise } from '@sentry/core/browser';
import type { Mock } from 'vitest';
import { afterEach, describe, expect, it, test, vi } from 'vitest';
import type { BrowserOptions } from '../src';
diff --git a/packages/browser/test/tracekit/ie.test.ts b/packages/browser/test/tracekit/ie.test.ts
index 7bf463d39f33..ad2328ac6e26 100644
--- a/packages/browser/test/tracekit/ie.test.ts
+++ b/packages/browser/test/tracekit/ie.test.ts
@@ -1,4 +1,4 @@
-import { createStackParser } from '@sentry/core';
+import { createStackParser } from '@sentry/core/browser';
import { describe, expect, it } from 'vitest';
import { exceptionFromError } from '../../src/eventbuilder';
import { chromeStackLineParser, geckoStackLineParser, winjsStackLineParser } from '../../src/stack-parsers';
diff --git a/packages/browser/test/tracekit/opera.test.ts b/packages/browser/test/tracekit/opera.test.ts
index d109fb39ac9c..5c85ebe8efec 100644
--- a/packages/browser/test/tracekit/opera.test.ts
+++ b/packages/browser/test/tracekit/opera.test.ts
@@ -1,4 +1,4 @@
-import { createStackParser } from '@sentry/core';
+import { createStackParser } from '@sentry/core/browser';
import { describe, expect, it } from 'vitest';
import { exceptionFromError } from '../../src/eventbuilder';
import { defaultStackParser, opera10StackLineParser, opera11StackLineParser } from '../../src/stack-parsers';
diff --git a/packages/browser/test/tracing/backgroundtab.test.ts b/packages/browser/test/tracing/backgroundtab.test.ts
index 8d76e895848f..44e827fc9e64 100644
--- a/packages/browser/test/tracing/backgroundtab.test.ts
+++ b/packages/browser/test/tracing/backgroundtab.test.ts
@@ -2,7 +2,7 @@
* @vitest-environment jsdom
*/
-import { getCurrentScope, setCurrentClient } from '@sentry/core';
+import { getCurrentScope, setCurrentClient } from '@sentry/core/browser';
import { JSDOM } from 'jsdom';
import { TextDecoder, TextEncoder } from 'util';
import { afterAll, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
diff --git a/packages/browser/test/tracing/browserTracingIntegration.test.ts b/packages/browser/test/tracing/browserTracingIntegration.test.ts
index 83f00a09092a..59c79d98bb73 100644
--- a/packages/browser/test/tracing/browserTracingIntegration.test.ts
+++ b/packages/browser/test/tracing/browserTracingIntegration.test.ts
@@ -2,7 +2,7 @@
* @vitest-environment jsdom
*/
-import type { Span, StartSpanOptions } from '@sentry/core';
+import type { Span, StartSpanOptions } from '@sentry/core/browser';
import {
getActiveSpan,
getCurrentScope,
@@ -19,7 +19,7 @@ import {
TRACING_DEFAULTS,
browserPerformanceTimeOrigin,
getSpanDescendants,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import { JSDOM } from 'jsdom';
import { TextDecoder, TextEncoder } from 'util';
import { afterAll, afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
diff --git a/packages/browser/test/tracing/linkedTraces.test.ts b/packages/browser/test/tracing/linkedTraces.test.ts
index 56616c6a7692..9426a67dec08 100644
--- a/packages/browser/test/tracing/linkedTraces.test.ts
+++ b/packages/browser/test/tracing/linkedTraces.test.ts
@@ -1,5 +1,5 @@
-import type { Span } from '@sentry/core';
-import { addChildSpanToSpan, debug, SentrySpan, spanToJSON, timestampInSeconds } from '@sentry/core';
+import type { Span } from '@sentry/core/browser';
+import { addChildSpanToSpan, debug, SentrySpan, spanToJSON, timestampInSeconds } from '@sentry/core/browser';
import { beforeEach, describe, expect, it, vi } from 'vitest';
import { BrowserClient } from '../../src';
import type { PreviousTraceInfo } from '../../src/tracing/linkedTraces';
diff --git a/packages/browser/test/tracing/request.test.ts b/packages/browser/test/tracing/request.test.ts
index 1674a96d1937..100f1aa9ab30 100644
--- a/packages/browser/test/tracing/request.test.ts
+++ b/packages/browser/test/tracing/request.test.ts
@@ -1,5 +1,5 @@
-import type { Client } from '@sentry/core';
-import * as utils from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import * as utils from '@sentry/core/browser';
import * as browserUtils from '@sentry-internal/browser-utils';
import type { MockInstance } from 'vitest';
import { afterEach, beforeAll, beforeEach, describe, expect, it, vi } from 'vitest';
diff --git a/packages/browser/test/tracing/setActiveSpan.test.ts b/packages/browser/test/tracing/setActiveSpan.test.ts
index d3c7ea79cf67..ed9926a01c62 100644
--- a/packages/browser/test/tracing/setActiveSpan.test.ts
+++ b/packages/browser/test/tracing/setActiveSpan.test.ts
@@ -1,4 +1,4 @@
-import { getActiveSpan, SentrySpan } from '@sentry/core';
+import { getActiveSpan, SentrySpan } from '@sentry/core/browser';
import { describe, expect, it } from 'vitest';
import { setActiveSpanInBrowser } from '../../src';
diff --git a/packages/browser/test/transports/fetch.test.ts b/packages/browser/test/transports/fetch.test.ts
index d330fe886d5f..5461bdd9d6f9 100644
--- a/packages/browser/test/transports/fetch.test.ts
+++ b/packages/browser/test/transports/fetch.test.ts
@@ -1,5 +1,5 @@
-import type { EventEnvelope, EventItem } from '@sentry/core';
-import { createEnvelope, serializeEnvelope } from '@sentry/core';
+import type { EventEnvelope, EventItem } from '@sentry/core/browser';
+import { createEnvelope, serializeEnvelope } from '@sentry/core/browser';
import type { Mock } from 'vitest';
import { afterEach, describe, expect, it, vi } from 'vitest';
import { makeFetchTransport } from '../../src/transports/fetch';
diff --git a/packages/browser/test/transports/offline.test.ts b/packages/browser/test/transports/offline.test.ts
index 993d4977ac21..f007c553f1af 100644
--- a/packages/browser/test/transports/offline.test.ts
+++ b/packages/browser/test/transports/offline.test.ts
@@ -4,8 +4,8 @@ import type {
EventItem,
InternalBaseTransportOptions,
TransportMakeRequestResponse,
-} from '@sentry/core';
-import { createEnvelope, createTransport } from '@sentry/core';
+} from '@sentry/core/browser';
+import { createEnvelope, createTransport } from '@sentry/core/browser';
import { TextDecoder, TextEncoder } from 'util';
import { beforeAll, describe, expect, it } from 'vitest';
import { createStore, makeBrowserOfflineTransport, push, shift, unshift } from '../../src/transports/offline';
diff --git a/packages/browser/tsconfig.json b/packages/browser/tsconfig.json
index b80e9ddbfaa5..8efb233764b7 100644
--- a/packages/browser/tsconfig.json
+++ b/packages/browser/tsconfig.json
@@ -4,6 +4,7 @@
"include": ["src/**/*", "test/loader.js"],
"compilerOptions": {
+ "moduleResolution": "bundler",
"lib": ["DOM", "es2020", "WebWorker"]
}
}
diff --git a/packages/cloudflare/src/workflows.ts b/packages/cloudflare/src/workflows.ts
index be9b363715eb..e77772c73783 100644
--- a/packages/cloudflare/src/workflows.ts
+++ b/packages/cloudflare/src/workflows.ts
@@ -63,9 +63,9 @@ async function propagationContextFromInstanceId(instanceId: string): Promise>(
@@ -112,7 +112,7 @@ class WrappedWorkflowStep implements WorkflowStep {
captureException(error, { mechanism: { handled: true, type: 'auto.faas.cloudflare.workflow' } });
throw error;
} finally {
- this._ctx.waitUntil(flush(2000));
+ this._waitUntil(flush(2000));
}
},
);
@@ -175,7 +175,8 @@ export function instrumentWorkflowWithSentry<
setAsyncLocalStorageAsyncContextStrategy();
return withIsolationScope(async isolationScope => {
- const client = init({ ...options, enableDedupe: false });
+ const waitUntil = context.waitUntil.bind(context);
+ const client = init({ ...options, ctx: context, enableDedupe: false });
isolationScope.setClient(client);
addCloudResourceContext(isolationScope);
@@ -188,10 +189,10 @@ export function instrumentWorkflowWithSentry<
return await obj.run.call(
obj,
event,
- new WrappedWorkflowStep(event.instanceId, context, options, step),
+ new WrappedWorkflowStep(event.instanceId, options, step, waitUntil),
);
} finally {
- context.waitUntil(flushAndDispose(client));
+ waitUntil(flushAndDispose(client));
}
});
});
diff --git a/packages/cloudflare/test/workflow.test.ts b/packages/cloudflare/test/workflow.test.ts
index 18dee2c09cfd..008486697b5b 100644
--- a/packages/cloudflare/test/workflow.test.ts
+++ b/packages/cloudflare/test/workflow.test.ts
@@ -74,6 +74,31 @@ const INSTANCE_ID = 'ae0ee067-61b3-4852-9219-5d62282270f0';
const SAMPLE_RAND = '0.44116884107728693';
const TRACE_ID = INSTANCE_ID.replace(/-/g, '');
+async function drainWaitUntilLikeCloudflareVitestPool(
+ waitUntilPromises: Promise[],
+ timeoutMs = 100,
+): Promise {
+ while (waitUntilPromises.length > 0) {
+ const batch = waitUntilPromises.splice(0);
+ let timeoutId: ReturnType | undefined;
+
+ const result = await Promise.race([
+ Promise.allSettled(batch).then(() => 'settled' as const),
+ new Promise<'timed-out'>(resolve => {
+ timeoutId = setTimeout(() => resolve('timed-out'), timeoutMs);
+ }),
+ ]);
+
+ if (timeoutId) {
+ clearTimeout(timeoutId);
+ }
+
+ if (result === 'timed-out') {
+ throw new Error('Cloudflare Vitest worker pool timed out while draining waitUntil promises');
+ }
+ }
+}
+
describe.skipIf(NODE_MAJOR_VERSION < 20)('workflows', () => {
beforeEach(() => {
vi.clearAllMocks();
@@ -152,6 +177,35 @@ describe.skipIf(NODE_MAJOR_VERSION < 20)('workflows', () => {
]);
});
+ test('workflow step and final flush waitUntil promises can be drained by the Cloudflare Vitest worker pool', async () => {
+ const waitUntilPromises: Promise[] = [];
+ const context: ExecutionContext = {
+ waitUntil: vi.fn((promise: Promise) => {
+ waitUntilPromises.push(promise);
+ }),
+ passThroughOnException: vi.fn(),
+ props: {},
+ };
+
+ class WaitUntilWorkflow {
+ public constructor(private _ctx: ExecutionContext) {}
+
+ public async run(_event: Readonly>, step: WorkflowStep): Promise {
+ await step.do('waitUntil step', async () => {
+ this._ctx.waitUntil(new Promise(resolve => setTimeout(resolve, 0)));
+ });
+ }
+ }
+
+ const TestWorkflowInstrumented = instrumentWorkflowWithSentry(getSentryOptions, WaitUntilWorkflow as any);
+ const workflow = new TestWorkflowInstrumented(context, {}) as WaitUntilWorkflow;
+ const event = { payload: {}, timestamp: new Date(), instanceId: INSTANCE_ID };
+
+ await workflow.run(event, mockStep);
+
+ await expect(drainWaitUntilLikeCloudflareVitestPool(waitUntilPromises)).resolves.toBeUndefined();
+ });
+
test('Wraps env with instrumentEnv', async () => {
class EnvTestWorkflow {
constructor(_ctx: ExecutionContext, _env: unknown) {}
diff --git a/packages/core/browser.js b/packages/core/browser.js
new file mode 100644
index 000000000000..6485bf1fe914
--- /dev/null
+++ b/packages/core/browser.js
@@ -0,0 +1,3 @@
+// This file is a compatibility shim for bundlers (e.g. webpack 4) that do not
+// support the package.json `exports` field for resolving subpath exports.
+module.exports = require('./build/cjs/browser.js');
diff --git a/packages/core/package.json b/packages/core/package.json
index cf7a9fb42282..6f1bacf40e59 100644
--- a/packages/core/package.json
+++ b/packages/core/package.json
@@ -10,13 +10,35 @@
"node": ">=18"
},
"files": [
- "/build"
+ "/build",
+ "browser.js",
+ "server.js"
],
"main": "build/cjs/index.js",
"module": "build/esm/index.js",
"types": "build/types/index.d.ts",
"exports": {
"./package.json": "./package.json",
+ "./server": {
+ "import": {
+ "types": "./build/types/server.d.ts",
+ "default": "./build/esm/server.js"
+ },
+ "require": {
+ "types": "./build/types/server.d.ts",
+ "default": "./build/cjs/server.js"
+ }
+ },
+ "./browser": {
+ "import": {
+ "types": "./build/types/browser.d.ts",
+ "default": "./build/esm/browser.js"
+ },
+ "require": {
+ "types": "./build/types/browser.d.ts",
+ "default": "./build/cjs/browser.js"
+ }
+ },
".": {
"import": {
"types": "./build/types/index.d.ts",
diff --git a/packages/core/rollup.npm.config.mjs b/packages/core/rollup.npm.config.mjs
index cc3ad4064820..610de17fadf3 100644
--- a/packages/core/rollup.npm.config.mjs
+++ b/packages/core/rollup.npm.config.mjs
@@ -14,26 +14,31 @@ if (!packageJson.version) {
const packageVersion = packageJson.version;
+const settings = {
+ packageSpecificConfig: {
+ output: {
+ // set exports to 'named' or 'auto' so that rollup doesn't warn
+ exports: 'named',
+ // set preserveModules to true because we don't want to bundle everything into one file.
+ preserveModules:
+ process.env.SENTRY_BUILD_PRESERVE_MODULES === undefined
+ ? true
+ : Boolean(process.env.SENTRY_BUILD_PRESERVE_MODULES),
+ },
+ plugins: [
+ replace({
+ preventAssignment: true,
+ values: {
+ __SENTRY_SDK_VERSION__: JSON.stringify(packageVersion),
+ },
+ }),
+ ],
+ },
+};
+
export default makeNPMConfigVariants(
makeBaseNPMConfig({
- packageSpecificConfig: {
- output: {
- // set exports to 'named' or 'auto' so that rollup doesn't warn
- exports: 'named',
- // set preserveModules to true because we don't want to bundle everything into one file.
- preserveModules:
- process.env.SENTRY_BUILD_PRESERVE_MODULES === undefined
- ? true
- : Boolean(process.env.SENTRY_BUILD_PRESERVE_MODULES),
- },
- plugins: [
- replace({
- preventAssignment: true,
- values: {
- __SENTRY_SDK_VERSION__: JSON.stringify(packageVersion),
- },
- }),
- ],
- },
+ ...settings,
+ entrypoints: ['src/index.ts', 'src/server.ts', 'src/browser.ts'],
}),
);
diff --git a/packages/core/server.js b/packages/core/server.js
new file mode 100644
index 000000000000..cc8162c1c03c
--- /dev/null
+++ b/packages/core/server.js
@@ -0,0 +1,3 @@
+// This file is a compatibility shim for bundlers (e.g. webpack 4) that do not
+// support the package.json `exports` field for resolving subpath exports.
+module.exports = require('./build/cjs/server.js');
diff --git a/packages/core/src/browser-exports.ts b/packages/core/src/browser-exports.ts
new file mode 100644
index 000000000000..6750899da1ed
--- /dev/null
+++ b/packages/core/src/browser-exports.ts
@@ -0,0 +1,16 @@
+/**
+ * Browser-specific utilities for Sentry SDKs
+ *
+ * @module
+ */
+export { getComponentName, getLocationHref, htmlTreeAsString } from './utils/browser';
+export { supportsDOMError, supportsHistory, supportsNativeFetch, supportsReportingObserver } from './utils/supports';
+export type { XhrBreadcrumbData, XhrBreadcrumbHint } from './types-hoist/breadcrumb';
+export type {
+ HandlerDataXhr,
+ HandlerDataDom,
+ HandlerDataHistory,
+ SentryXhrData,
+ SentryWrappedXMLHttpRequest,
+} from './types-hoist/instrument';
+export type { BrowserClientReplayOptions, BrowserClientProfilingOptions } from './types-hoist/browseroptions';
diff --git a/packages/core/src/browser.ts b/packages/core/src/browser.ts
new file mode 100644
index 000000000000..a0ba2b75ef01
--- /dev/null
+++ b/packages/core/src/browser.ts
@@ -0,0 +1,4 @@
+/* eslint-disable max-lines */
+
+export * from './shared-exports';
+export * from './browser-exports';
diff --git a/packages/core/src/build-time-plugins/buildTimeOptionsBase.ts b/packages/core/src/build-time-plugins/buildTimeOptionsBase.ts
index f61aa6c40c94..c87ce1198df4 100644
--- a/packages/core/src/build-time-plugins/buildTimeOptionsBase.ts
+++ b/packages/core/src/build-time-plugins/buildTimeOptionsBase.ts
@@ -125,6 +125,15 @@ export interface BuildTimeOptionsBase {
* Options for bundle size optimizations by excluding certain features of the Sentry SDK.
*/
bundleSizeOptimizations?: BundleSizeOptimizationsOptions;
+
+ /**
+ * A key that is used to identify the application in the Sentry bundler plugins.
+ * This key is used by the `thirdPartyErrorFilterIntegration` to filter out errors
+ * originating from third-party scripts.
+ *
+ * @see https://docs.sentry.io/platforms/javascript/configuration/filtering/#using-thirdpartyerrorfilterintegration
+ */
+ applicationKey?: string;
}
/**
diff --git a/packages/core/src/client.ts b/packages/core/src/client.ts
index 766a0a4ecfdc..94cc00e19f88 100644
--- a/packages/core/src/client.ts
+++ b/packages/core/src/client.ts
@@ -12,6 +12,7 @@ import type { Scope } from './scope';
import { updateSession } from './session';
import { getDynamicSamplingContextFromScope } from './tracing/dynamicSamplingContext';
import { isStreamedBeforeSendSpanCallback } from './tracing/spans/beforeSendSpan';
+import { extractGenAiSpansFromEvent } from './tracing/spans/extractGenAiSpans';
import { DEFAULT_TRANSPORT_BUFFER_SIZE } from './transports/base';
import type { Breadcrumb, BreadcrumbHint, FetchBreadcrumbHint, XhrBreadcrumbHint } from './types-hoist/breadcrumb';
import type { CheckIn, MonitorConfig } from './types-hoist/checkin';
@@ -527,12 +528,20 @@ export abstract class Client {
public sendEvent(event: Event, hint: EventHint = {}): void {
this.emit('beforeSendEvent', event, hint);
+ // Extract gen_ai spans from transaction and convert to span v2 format.
+ // This mutates event.spans to remove the extracted spans.
+ const genAiSpanItem = extractGenAiSpansFromEvent(event, this);
+
let env = createEventEnvelope(event, this._dsn, this._options._metadata, this._options.tunnel);
for (const attachment of hint.attachments || []) {
env = addItemToEnvelope(env, createAttachmentEnvelopeItem(attachment));
}
+ if (genAiSpanItem) {
+ env = addItemToEnvelope(env, genAiSpanItem);
+ }
+
// sendEnvelope should not throw
// eslint-disable-next-line @typescript-eslint/no-floating-promises
this.sendEnvelope(env).then(sendResponse => this.emit('afterSendEvent', event, sendResponse));
diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts
index 1751192d13dc..3e40ce629795 100644
--- a/packages/core/src/index.ts
+++ b/packages/core/src/index.ts
@@ -1,603 +1,3 @@
-/* eslint-disable max-lines */
-
-export type { ClientClass as SentryCoreCurrentScopes } from './sdk';
-export type { AsyncContextStrategy } from './asyncContext/types';
-export type { Carrier } from './carrier';
-export type { OfflineStore, OfflineTransportOptions } from './transports/offline';
-export type { ServerRuntimeClientOptions } from './server-runtime-client';
-export type { IntegrationIndex } from './integration';
-
-export * from './tracing';
-export * from './semanticAttributes';
-export { createEventEnvelope, createSessionEnvelope, createSpanEnvelope } from './envelope';
-export {
- captureCheckIn,
- withMonitor,
- captureException,
- captureEvent,
- captureMessage,
- lastEventId,
- close,
- flush,
- setContext,
- setExtra,
- setExtras,
- setTag,
- setTags,
- setUser,
- setConversationId,
- isInitialized,
- isEnabled,
- startSession,
- endSession,
- captureSession,
- addEventProcessor,
-} from './exports';
-export {
- getCurrentScope,
- getIsolationScope,
- getGlobalScope,
- withScope,
- withIsolationScope,
- getClient,
- getTraceContextFromScope,
- registerExternalPropagationContext,
- getExternalPropagationContext,
- hasExternalPropagationContext,
-} from './currentScopes';
-export { getDefaultCurrentScope, getDefaultIsolationScope } from './defaultScopes';
-export { setAsyncContextStrategy } from './asyncContext';
-export { getGlobalSingleton, getMainCarrier } from './carrier';
-export { makeSession, closeSession, updateSession } from './session';
-export { Scope } from './scope';
-export type { CaptureContext, ScopeContext, ScopeData } from './scope';
-export { notifyEventProcessors } from './eventProcessors';
-export { getEnvelopeEndpointWithUrlEncodedAuth, getReportDialogEndpoint, SENTRY_API_VERSION } from './api';
-export { Client } from './client';
-export { ServerRuntimeClient } from './server-runtime-client';
-export { initAndBind, setCurrentClient } from './sdk';
-export { createTransport } from './transports/base';
-export { makeOfflineTransport } from './transports/offline';
-export { makeMultiplexedTransport, MULTIPLEXED_TRANSPORT_EXTRA_KEY } from './transports/multiplexed';
-export { getIntegrationsToSetup, addIntegration, defineIntegration, installedIntegrations } from './integration';
-export {
- _INTERNAL_skipAiProviderWrapping,
- _INTERNAL_shouldSkipAiProviderWrapping,
- _INTERNAL_clearAiProviderSkips,
-} from './utils/ai/providerSkip';
-export { envToBool } from './utils/envToBool';
-export { applyScopeDataToEvent, mergeScopeData, getCombinedScopeData } from './utils/scopeData';
-export { prepareEvent } from './utils/prepareEvent';
-export type { ExclusiveEventHintOrCaptureContext } from './utils/prepareEvent';
-export { createCheckInEnvelope } from './checkin';
-export { hasSpansEnabled } from './utils/hasSpansEnabled';
-export { withStreamedSpan } from './tracing/spans/beforeSendSpan';
-export { isStreamedBeforeSendSpanCallback } from './tracing/spans/beforeSendSpan';
-export { safeSetSpanJSONAttributes } from './tracing/spans/captureSpan';
-export { isSentryRequestUrl } from './utils/isSentryRequestUrl';
-export { handleCallbackErrors } from './utils/handleCallbackErrors';
-export { parameterize, fmt } from './utils/parameterize';
-export type { HandleTunnelRequestOptions } from './utils/tunnel';
-export { handleTunnelRequest } from './utils/tunnel';
-
-export { addAutoIpAddressToSession } from './utils/ipAddress';
-// eslint-disable-next-line deprecation/deprecation
-export { addAutoIpAddressToUser } from './utils/ipAddress';
-export {
- convertSpanLinksForEnvelope,
- spanToTraceHeader,
- spanToJSON,
- spanToStreamedSpanJSON,
- spanIsSampled,
- spanToTraceContext,
- getSpanDescendants,
- getStatusMessage,
- getRootSpan,
- INTERNAL_getSegmentSpan,
- getActiveSpan,
- addChildSpanToSpan,
- spanTimeInputToSeconds,
- updateSpanName,
-} from './utils/spanUtils';
-export { _setSpanForScope as _INTERNAL_setSpanForScope } from './utils/spanOnScope';
-export { parseSampleRate } from './utils/parseSampleRate';
-export { applySdkMetadata } from './utils/sdkMetadata';
-export { getTraceData } from './utils/traceData';
-export { shouldPropagateTraceForUrl } from './utils/tracePropagationTargets';
-export { getTraceMetaTags } from './utils/meta';
-export { debounce } from './utils/debounce';
-export { makeWeakRef, derefWeakRef } from './utils/weakRef';
-export type { MaybeWeakRef } from './utils/weakRef';
-export { shouldIgnoreSpan } from './utils/should-ignore-span';
-export {
- winterCGHeadersToDict,
- winterCGRequestToRequestData,
- captureBodyFromWinterCGRequest,
- httpRequestToRequestData,
- extractQueryParamsFromUrl,
- headersToDict,
- httpHeadersToSpanAttributes,
- getMaxBodyByteLength,
- MAX_BODY_BYTE_LENGTH,
-} from './utils/request';
-export type { MaxRequestBodySize } from './utils/request';
-export { DEFAULT_ENVIRONMENT, DEV_ENVIRONMENT } from './constants';
-export { addBreadcrumb } from './breadcrumbs';
-export { functionToStringIntegration } from './integrations/functiontostring';
-// eslint-disable-next-line deprecation/deprecation
-export { inboundFiltersIntegration } from './integrations/eventFilters';
-export { eventFiltersIntegration } from './integrations/eventFilters';
-export { linkedErrorsIntegration } from './integrations/linkederrors';
-export { moduleMetadataIntegration } from './integrations/moduleMetadata';
-export { requestDataIntegration } from './integrations/requestdata';
-export { captureConsoleIntegration } from './integrations/captureconsole';
-export { patchExpressModule, setupExpressErrorHandler, expressErrorHandler } from './integrations/express/index';
-export type {
- ExpressIntegrationOptions,
- ExpressHandlerOptions,
- ExpressMiddleware,
- ExpressErrorMiddleware,
-} from './integrations/express/types';
-export { dedupeIntegration } from './integrations/dedupe';
-export { extraErrorDataIntegration } from './integrations/extraerrordata';
-export { rewriteFramesIntegration } from './integrations/rewriteframes';
-export { supabaseIntegration, instrumentSupabaseClient } from './integrations/supabase';
-export { instrumentPostgresJsSql } from './integrations/postgresjs';
-export { zodErrorsIntegration } from './integrations/zoderrors';
-export { thirdPartyErrorFilterIntegration } from './integrations/third-party-errors-filter';
-export { consoleIntegration } from './integrations/console';
-export type { FeatureFlagsIntegration } from './integrations/featureFlags';
-export { featureFlagsIntegration } from './integrations/featureFlags';
-export { growthbookIntegration } from './integrations/featureFlags';
-export { conversationIdIntegration } from './integrations/conversationId';
-export { patchHttpModuleClient } from './integrations/http/client-patch';
-export { getHttpClientSubscriptions } from './integrations/http/client-subscriptions';
-export { addOutgoingRequestBreadcrumb } from './integrations/http/add-outgoing-request-breadcrumb';
-export {
- getRequestUrl,
- getRequestUrlObject,
- getRequestUrlFromClientRequest,
- getRequestOptions,
-} from './integrations/http/get-request-url';
-export { HTTP_ON_CLIENT_REQUEST, HTTP_ON_SERVER_REQUEST } from './integrations/http/constants';
-export type {
- HttpInstrumentationOptions,
- HttpClientRequest,
- HttpIncomingMessage,
- HttpServerResponse,
- HttpModuleExport,
-} from './integrations/http/types';
-
-export { profiler } from './profiling';
-// eslint thinks the entire function is deprecated (while only one overload is actually deprecated)
-// Therefore:
-// eslint-disable-next-line deprecation/deprecation
-export { instrumentFetchRequest, _INTERNAL_getTracingHeadersForFetchRequest } from './fetch';
-export { trpcMiddleware } from './trpc';
-export { wrapMcpServerWithSentry } from './integrations/mcp-server';
-export { captureFeedback } from './feedback';
-export type { ReportDialogOptions } from './report-dialog';
-export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/internal';
-export * as logger from './logs/public-api';
-export { consoleLoggingIntegration } from './logs/console-integration';
-export {
- _INTERNAL_captureMetric,
- _INTERNAL_flushMetricsBuffer,
- _INTERNAL_captureSerializedMetric,
-} from './metrics/internal';
-export * as metrics from './metrics/public-api';
-export type { MetricOptions } from './metrics/public-api';
-export { createConsolaReporter } from './integrations/consola';
-export { addVercelAiProcessors } from './tracing/vercel-ai';
-export { _INTERNAL_getSpanContextForToolCallId, _INTERNAL_cleanupToolCallSpanContext } from './tracing/vercel-ai/utils';
-export { toolCallSpanContextMap as _INTERNAL_toolCallSpanContextMap } from './tracing/vercel-ai/constants';
-export { instrumentOpenAiClient } from './tracing/openai';
-export { OPENAI_INTEGRATION_NAME } from './tracing/openai/constants';
-export { instrumentAnthropicAiClient } from './tracing/anthropic-ai';
-export { ANTHROPIC_AI_INTEGRATION_NAME } from './tracing/anthropic-ai/constants';
-export { instrumentGoogleGenAIClient } from './tracing/google-genai';
-export { GOOGLE_GENAI_INTEGRATION_NAME } from './tracing/google-genai/constants';
-export type { GoogleGenAIResponse } from './tracing/google-genai/types';
-export { createLangChainCallbackHandler, instrumentLangChainEmbeddings } from './tracing/langchain';
-export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants';
-export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types';
-export { instrumentStateGraphCompile, instrumentCreateReactAgent, instrumentLangGraph } from './tracing/langgraph';
-export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants';
-export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types';
-export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types';
-export type {
- AnthropicAiClient,
- AnthropicAiOptions,
- AnthropicAiInstrumentedMethod,
- AnthropicAiResponse,
-} from './tracing/anthropic-ai/types';
-export type {
- GoogleGenAIClient,
- GoogleGenAIChat,
- GoogleGenAIOptions,
- GoogleGenAIInstrumentedMethod,
-} from './tracing/google-genai/types';
-// eslint-disable-next-line deprecation/deprecation
-export type { GoogleGenAIIstrumentedMethod } from './tracing/google-genai/types';
-
-export { SpanBuffer } from './tracing/spans/spanBuffer';
-export { hasSpanStreamingEnabled } from './tracing/spans/hasSpanStreamingEnabled';
-export { spanStreamingIntegration } from './integrations/spanStreaming';
-
-export type { FeatureFlag } from './utils/featureFlags';
-
-export {
- _INTERNAL_copyFlagsFromScopeToEvent,
- _INTERNAL_insertFlagToScope,
- _INTERNAL_addFeatureFlagToActiveSpan,
- _INTERNAL_FLAG_BUFFER_SIZE,
- _INTERNAL_MAX_FLAGS_PER_SPAN,
-} from './utils/featureFlags';
-
-export { applyAggregateErrorsToEvent } from './utils/aggregate-errors';
-export { getBreadcrumbLogLevelFromHttpStatusCode } from './utils/breadcrumb-log-level';
-export { getComponentName, getLocationHref, htmlTreeAsString } from './utils/browser';
-export { dsnFromString, dsnToString, makeDsn } from './utils/dsn';
-// eslint-disable-next-line deprecation/deprecation
-export { SentryError } from './utils/error';
-export { GLOBAL_OBJ } from './utils/worldwide';
-export type { InternalGlobal } from './utils/worldwide';
-export { addConsoleInstrumentationHandler } from './instrument/console';
-export { addFetchEndInstrumentationHandler, addFetchInstrumentationHandler } from './instrument/fetch';
-export { addGlobalErrorInstrumentationHandler } from './instrument/globalError';
-export { addGlobalUnhandledRejectionInstrumentationHandler } from './instrument/globalUnhandledRejection';
-export { addHandler, maybeInstrument, resetInstrumentationHandlers, triggerHandlers } from './instrument/handlers';
-export {
- isDOMError,
- isDOMException,
- isElement,
- isError,
- isErrorEvent,
- isEvent,
- isInstanceOf,
- isParameterizedString,
- isPlainObject,
- isPrimitive,
- isRegExp,
- isString,
- isSyntheticEvent,
- isThenable,
- isVueViewModel,
-} from './utils/is';
-export { isBrowser } from './utils/isBrowser';
-export { CONSOLE_LEVELS, consoleSandbox, debug, originalConsoleMethods } from './utils/debug-logger';
-export type { SentryDebugLogger } from './utils/debug-logger';
-export {
- addContextToFrame,
- addExceptionMechanism,
- addExceptionTypeValue,
- checkOrSetAlreadyCaught,
- isAlreadyCaptured,
- getEventDescription,
- parseSemver,
- uuid4,
-} from './utils/misc';
-export { isNodeEnv, loadModule } from './utils/node';
-export { normalize, normalizeToSize, normalizeUrlToBase } from './utils/normalize';
-export { setNormalizationDepthOverrideHint, setSkipNormalizationHint } from './utils/normalizationHints';
-export {
- addNonEnumerableProperty,
- convertToPlainObject,
- // eslint-disable-next-line deprecation/deprecation
- dropUndefinedKeys,
- extractExceptionKeysForMessage,
- fill,
- getOriginalFunction,
- markFunctionWrapped,
- objectify,
-} from './utils/object';
-export { basename, dirname, isAbsolute, join, normalizePath, relative, resolve } from './utils/path';
-export { makePromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from './utils/promisebuffer';
-export type { PromiseBuffer } from './utils/promisebuffer';
-export { severityLevelFromString } from './utils/severity';
-export { replaceExports } from './utils/exports';
-export {
- UNKNOWN_FUNCTION,
- createStackParser,
- getFramesFromEvent,
- getFunctionName,
- stackParserFromStackParserOptions,
- stripSentryFramesAndReverse,
-} from './utils/stacktrace';
-export { filenameIsInApp, node, nodeStackLineParser } from './utils/node-stack-trace';
-export { isMatchingPattern, safeJoin, snipLine, stringMatchesSomePattern, truncate } from './utils/string';
-export {
- isNativeFunction,
- supportsDOMError,
- supportsDOMException,
- supportsErrorEvent,
- // eslint-disable-next-line deprecation/deprecation
- supportsFetch,
- supportsHistory,
- supportsNativeFetch,
- // eslint-disable-next-line deprecation/deprecation
- supportsReferrerPolicy,
- supportsReportingObserver,
-} from './utils/supports';
-export { SyncPromise, rejectedSyncPromise, resolvedSyncPromise } from './utils/syncpromise';
-export { browserPerformanceTimeOrigin, dateTimestampInSeconds, timestampInSeconds } from './utils/time';
-export {
- TRACEPARENT_REGEXP,
- extractTraceparentData,
- generateSentryTraceHeader,
- propagationContextFromHeaders,
- shouldContinueTrace,
- generateTraceparentHeader,
-} from './utils/tracing';
-export { getSDKSource, isBrowserBundle } from './utils/env';
-export type { SdkSource } from './utils/env';
-export {
- addItemToEnvelope,
- createAttachmentEnvelopeItem,
- createEnvelope,
- createEventEnvelopeHeaders,
- createSpanEnvelopeItem,
- envelopeContainsItemType,
- envelopeItemTypeToDataCategory,
- forEachEnvelopeItem,
- getSdkMetadataForEnvelopeHeader,
- parseEnvelope,
- serializeEnvelope,
-} from './utils/envelope';
-export { createClientReportEnvelope } from './utils/clientreport';
-export {
- DEFAULT_RETRY_AFTER,
- disabledUntil,
- isRateLimited,
- parseRetryAfterHeader,
- updateRateLimits,
-} from './utils/ratelimit';
-export type { RateLimits } from './utils/ratelimit';
-export {
- MAX_BAGGAGE_STRING_LENGTH,
- SENTRY_BAGGAGE_KEY_PREFIX,
- SENTRY_BAGGAGE_KEY_PREFIX_REGEX,
- baggageHeaderToDynamicSamplingContext,
- dynamicSamplingContextToSentryBaggageHeader,
- parseBaggageHeader,
- objectToBaggageHeader,
- mergeBaggageHeaders,
-} from './utils/baggage';
-export {
- getSanitizedUrlString,
- parseUrl,
- stripUrlQueryAndFragment,
- parseStringToURLObject,
- getHttpSpanDetailsFromUrlObject,
- isURLObjectRelative,
- getSanitizedUrlStringFromUrlObject,
- stripDataUrlContent,
-} from './utils/url';
-export {
- eventFromMessage,
- eventFromUnknownInput,
- exceptionFromError,
- parseStackFrames,
- _enhanceErrorWithSentryInfo as _INTERNAL_enhanceErrorWithSentryInfo,
-} from './utils/eventbuilder';
-export { callFrameToStackFrame, watchdogTimer } from './utils/anr';
-export { LRUMap } from './utils/lru';
-export { generateTraceId, generateSpanId } from './utils/propagationContext';
-export { vercelWaitUntil } from './utils/vercelWaitUntil';
-export { flushIfServerless } from './utils/flushIfServerless';
-export { SDK_VERSION } from './utils/version';
-export { getDebugImagesForResources, getFilenameToDebugIdMap } from './utils/debug-ids';
-export { getFilenameToMetadataMap } from './metadata';
-export { escapeStringForRegex } from './vendor/escapeStringForRegex';
-
-export type { Attachment } from './types-hoist/attachment';
-export type {
- Breadcrumb,
- BreadcrumbHint,
- FetchBreadcrumbData,
- XhrBreadcrumbData,
- FetchBreadcrumbHint,
- XhrBreadcrumbHint,
-} from './types-hoist/breadcrumb';
-export type { ClientReport, Outcome, EventDropReason } from './types-hoist/clientreport';
-export type {
- Context,
- Contexts,
- DeviceContext,
- OsContext,
- AppContext,
- CultureContext,
- TraceContext,
- CloudResourceContext,
- MissingInstrumentationContext,
-} from './types-hoist/context';
-export type { DataCategory } from './types-hoist/datacategory';
-export type { DsnComponents, DsnLike, DsnProtocol } from './types-hoist/dsn';
-export type { DebugImage, DebugMeta } from './types-hoist/debugMeta';
-export type {
- AttachmentItem,
- BaseEnvelopeHeaders,
- BaseEnvelopeItemHeaders,
- ClientReportEnvelope,
- ClientReportItem,
- DynamicSamplingContext,
- Envelope,
- EnvelopeItemType,
- EnvelopeItem,
- EventEnvelope,
- EventEnvelopeHeaders,
- EventItem,
- ReplayEnvelope,
- FeedbackItem,
- SessionEnvelope,
- SessionItem,
- UserFeedbackItem,
- CheckInItem,
- CheckInEnvelope,
- RawSecurityEnvelope,
- RawSecurityItem,
- ProfileItem,
- ProfileChunkEnvelope,
- ProfileChunkItem,
- SpanEnvelope,
- StreamedSpanEnvelope,
- SpanItem,
- LogEnvelope,
- MetricEnvelope,
-} from './types-hoist/envelope';
-export type { ExtendedError } from './types-hoist/error';
-export type { Event, EventHint, EventType, ErrorEvent, TransactionEvent } from './types-hoist/event';
-export type { EventProcessor } from './types-hoist/eventprocessor';
-export type { Exception } from './types-hoist/exception';
-export type { Extra, Extras } from './types-hoist/extra';
-export type { Integration, IntegrationFn } from './types-hoist/integration';
-export type { Mechanism } from './types-hoist/mechanism';
-export type { ExtractedNodeRequestData, HttpHeaderValue, Primitive, WorkerLocation } from './types-hoist/misc';
-export type { ClientOptions, CoreOptions as Options, ServerRuntimeOptions } from './types-hoist/options';
-export type { Package } from './types-hoist/package';
-export type { PolymorphicEvent, PolymorphicRequest } from './types-hoist/polymorphics';
-export type {
- ThreadId,
- FrameId,
- StackId,
- ThreadCpuSample,
- ThreadCpuStack,
- ThreadCpuFrame,
- ThreadCpuProfile,
- ContinuousThreadCpuProfile,
- Profile,
- ProfileChunk,
-} from './types-hoist/profiling';
-export type {
- ReplayEndEvent,
- ReplayEvent,
- ReplayRecordingData,
- ReplayRecordingMode,
- ReplayStartEvent,
- ReplayStopReason,
-} from './types-hoist/replay';
-export type {
- FeedbackErrorCode,
- FeedbackErrorMessages,
- FeedbackEvent,
- FeedbackFormData,
- FeedbackInternalOptions,
- FeedbackModalIntegration,
- FeedbackScreenshotIntegration,
- SendFeedback,
- SendFeedbackParams,
- UserFeedback,
-} from './types-hoist/feedback';
-export type {
- QueryParams,
- RequestEventData,
- RequestHookInfo,
- ResponseHookInfo,
- SanitizedRequestData,
-} from './types-hoist/request';
-export type { Runtime } from './types-hoist/runtime';
-export type { SdkInfo } from './types-hoist/sdkinfo';
-export type { SdkMetadata } from './types-hoist/sdkmetadata';
-export type {
- SessionAggregates,
- AggregationCounts,
- Session,
- SessionContext,
- SessionStatus,
- SerializedSession,
-} from './types-hoist/session';
-export type { SeverityLevel } from './types-hoist/severity';
-export type {
- Span,
- SentrySpanArguments,
- SpanOrigin,
- SpanAttributeValue,
- SpanAttributes,
- SpanTimeInput,
- SpanJSON,
- SpanContextData,
- TraceFlag,
- SerializedStreamedSpan,
- SerializedStreamedSpanContainer,
- StreamedSpanJSON,
-} from './types-hoist/span';
-export type { SpanStatus } from './types-hoist/spanStatus';
-export type { Log, LogSeverityLevel } from './types-hoist/log';
-export type { SpanLink } from './types-hoist/link';
-export type {
- Metric,
- MetricType,
- SerializedMetric,
- SerializedMetricContainer,
- // eslint-disable-next-line deprecation/deprecation
- SerializedMetricAttributeValue,
-} from './types-hoist/metric';
-export type { TimedEvent } from './types-hoist/timedEvent';
-export type { StackFrame } from './types-hoist/stackframe';
-export type { Stacktrace, StackParser, StackLineParser, StackLineParserFn } from './types-hoist/stacktrace';
-export type { PropagationContext, TracePropagationTargets, SerializedTraceData } from './types-hoist/tracing';
-export type { StartSpanOptions } from './types-hoist/startSpanOptions';
-export type { TraceparentData, TransactionSource } from './types-hoist/transaction';
-export type {
- TracesSamplerSamplingContext,
- CustomSamplingContext,
- SamplingContext,
-} from './types-hoist/samplingcontext';
-export type {
- DurationUnit,
- InformationUnit,
- FractionUnit,
- MeasurementUnit,
- NoneUnit,
- Measurements,
-} from './types-hoist/measurement';
-export type { Thread } from './types-hoist/thread';
-export type {
- Transport,
- TransportRequest,
- TransportMakeRequestResponse,
- InternalBaseTransportOptions,
- BaseTransportOptions,
- TransportRequestExecutor,
-} from './types-hoist/transport';
-export type { User } from './types-hoist/user';
-export type { WebFetchHeaders, WebFetchRequest } from './types-hoist/webfetchapi';
-export type { WrappedFunction } from './types-hoist/wrappedfunction';
-export type {
- HandlerDataFetch,
- HandlerDataXhr,
- HandlerDataDom,
- HandlerDataConsole,
- HandlerDataHistory,
- HandlerDataError,
- HandlerDataUnhandledRejection,
- ConsoleLevel,
- SentryXhrData,
- SentryWrappedXMLHttpRequest,
-} from './types-hoist/instrument';
-export type { BrowserClientReplayOptions, BrowserClientProfilingOptions } from './types-hoist/browseroptions';
-export type {
- CheckIn,
- MonitorConfig,
- FinishedCheckIn,
- InProgressCheckIn,
- SerializedCheckIn,
-} from './types-hoist/checkin';
-export type { ParameterizedString } from './types-hoist/parameterize';
-export type { ContinuousProfiler, ProfilingIntegration, Profiler } from './types-hoist/profiling';
-export type { ViewHierarchyData, ViewHierarchyWindow } from './types-hoist/view-hierarchy';
-export type { LegacyCSPReport } from './types-hoist/csp';
-export type { SerializedLog, SerializedLogContainer } from './types-hoist/log';
-export type {
- BuildTimeOptionsBase,
- UnstableVitePluginOptions,
- UnstableRollupPluginOptions,
- UnstableWebpackPluginOptions,
-} from './build-time-plugins/buildTimeOptionsBase';
-export type { RandomSafeContextRunner as _INTERNAL_RandomSafeContextRunner } from './utils/randomSafeContext';
-export {
- withRandomSafeContext as _INTERNAL_withRandomSafeContext,
- safeMathRandom as _INTERNAL_safeMathRandom,
- safeDateNow as _INTERNAL_safeDateNow,
-} from './utils/randomSafeContext';
-export { safeUnref as _INTERNAL_safeUnref } from './utils/timer';
+export * from './shared-exports';
+export * from './server-exports';
+export * from './browser-exports';
diff --git a/packages/core/src/instrument/console.ts b/packages/core/src/instrument/console.ts
index ef7e9c804943..737e9fd907fa 100644
--- a/packages/core/src/instrument/console.ts
+++ b/packages/core/src/instrument/console.ts
@@ -1,10 +1,20 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/ban-types */
+import { DEBUG_BUILD } from '../debug-build';
import type { ConsoleLevel, HandlerDataConsole } from '../types-hoist/instrument';
import { CONSOLE_LEVELS, originalConsoleMethods } from '../utils/debug-logger';
import { fill } from '../utils/object';
+import { stringMatchesSomePattern } from '../utils/string';
import { GLOBAL_OBJ } from '../utils/worldwide';
import { addHandler, maybeInstrument, triggerHandlers } from './handlers';
+import { debug } from '../utils/debug-logger';
+
+/**
+ * Filter out console messages that match the given strings or regular expressions.
+ * These will neither be passed to the handler, and they will also not be logged to the user, unless they have debug enabled.
+ * This is a set to avoid duplicate integration setups to add the same filter multiple times.
+ */
+const _filter = new Set([]);
/**
* Add an instrumentation handler for when a console.xxx method is called.
@@ -20,6 +30,27 @@ export function addConsoleInstrumentationHandler(handler: (data: HandlerDataCons
return removeHandler;
}
+/**
+ * Add a filter to the console instrumentation to filter out console messages that match the given strings or regular expressions.
+ * Returns a function to remove the filter.
+ */
+export function addConsoleInstrumentationFilter(filter: (string | RegExp)[]): () => void {
+ for (const f of filter) {
+ _filter.add(f);
+ }
+
+ return () => {
+ for (const f of filter) {
+ _filter.delete(f);
+ }
+ };
+}
+
+/** Only exported for tests. */
+export function _INTERNAL_resetConsoleInstrumentationOptions(): void {
+ _filter.clear();
+}
+
function instrumentConsole(): void {
if (!('console' in GLOBAL_OBJ)) {
return;
@@ -34,10 +65,21 @@ function instrumentConsole(): void {
originalConsoleMethods[level] = originalConsoleMethod;
return function (...args: any[]): void {
- triggerHandlers('console', { args, level } as HandlerDataConsole);
-
+ const firstArg = args[0];
const log = originalConsoleMethods[level];
- log?.apply(GLOBAL_OBJ.console, args);
+
+ const isFiltered = _filter.size && typeof firstArg === 'string' && stringMatchesSomePattern(firstArg, _filter);
+
+ // Only trigger handlers for non-filtered messages
+ if (!isFiltered) {
+ triggerHandlers('console', { args, level } as HandlerDataConsole);
+ }
+
+ // Only log filtered messages in debug mode
+ if (!isFiltered || (DEBUG_BUILD && debug.isEnabled())) {
+ // Call original console method
+ log?.apply(GLOBAL_OBJ.console, args);
+ }
};
});
});
diff --git a/packages/core/src/integrations/console.ts b/packages/core/src/integrations/console.ts
index e39fd5ddcf0d..21e1ee53d328 100644
--- a/packages/core/src/integrations/console.ts
+++ b/packages/core/src/integrations/console.ts
@@ -1,6 +1,6 @@
import { addBreadcrumb } from '../breadcrumbs';
import { getClient } from '../currentScopes';
-import { addConsoleInstrumentationHandler } from '../instrument/console';
+import { addConsoleInstrumentationFilter, addConsoleInstrumentationHandler } from '../instrument/console';
import { defineIntegration } from '../integration';
import type { ConsoleLevel } from '../types-hoist/instrument';
import { CONSOLE_LEVELS } from '../utils/debug-logger';
@@ -10,6 +10,11 @@ import { GLOBAL_OBJ } from '../utils/worldwide';
interface ConsoleIntegrationOptions {
levels: ConsoleLevel[];
+ /**
+ * Filter out console messages that match the given strings or regular expressions.
+ * These will neither be passed to the handler, and they will also not be logged to the user, unless they have debug enabled.
+ */
+ filter?: (string | RegExp)[];
}
type GlobalObjectWithUtil = typeof GLOBAL_OBJ & {
@@ -48,8 +53,12 @@ export const consoleIntegration = defineIntegration((options: Partial;
+
+ // Tools: MCP SDK calls registeredTool.executor (generated from handler at registration time)
+ const registeredTools = server['_registeredTools'];
+ if (registeredTools && typeof registeredTools === 'object') {
+ for (const [name, tool] of Object.entries(registeredTools as Record>)) {
+ if (typeof tool['executor'] === 'function') {
+ tool['executor'] = createWrappedHandler(tool['executor'] as MCPHandler, 'registerTool', name);
+ }
+ }
+ }
+
+ // Resources: MCP SDK calls registeredResource.readCallback
+ const registeredResources = server['_registeredResources'];
+ if (registeredResources && typeof registeredResources === 'object') {
+ for (const [name, resource] of Object.entries(registeredResources as Record>)) {
+ if (typeof resource['readCallback'] === 'function') {
+ resource['readCallback'] = createWrappedHandler(
+ resource['readCallback'] as MCPHandler,
+ 'registerResource',
+ name,
+ );
+ }
+ }
+ }
+
+ // Resource templates: MCP SDK calls registeredResourceTemplate.readCallback
+ const registeredResourceTemplates = server['_registeredResourceTemplates'];
+ if (registeredResourceTemplates && typeof registeredResourceTemplates === 'object') {
+ for (const [name, template] of Object.entries(
+ registeredResourceTemplates as Record>,
+ )) {
+ if (typeof template['readCallback'] === 'function') {
+ template['readCallback'] = createWrappedHandler(
+ template['readCallback'] as MCPHandler,
+ 'registerResource',
+ name,
+ );
+ }
+ }
+ }
+
+ // Prompts: MCP SDK calls registeredPrompt.handler
+ const registeredPrompts = server['_registeredPrompts'];
+ if (registeredPrompts && typeof registeredPrompts === 'object') {
+ for (const [name, prompt] of Object.entries(registeredPrompts as Record>)) {
+ if (typeof prompt['handler'] === 'function') {
+ prompt['handler'] = createWrappedHandler(prompt['handler'] as MCPHandler, 'registerPrompt', name);
+ }
+ }
+ }
+}
diff --git a/packages/core/src/integrations/mcp-server/index.ts b/packages/core/src/integrations/mcp-server/index.ts
index b4ef87f0fa0a..952e780c0a9f 100644
--- a/packages/core/src/integrations/mcp-server/index.ts
+++ b/packages/core/src/integrations/mcp-server/index.ts
@@ -1,6 +1,6 @@
import { getClient } from '../../currentScopes';
import { fill } from '../../utils/object';
-import { wrapAllMCPHandlers } from './handlers';
+import { wrapAllMCPHandlers, wrapExistingHandlers } from './handlers';
import { wrapTransportError, wrapTransportOnClose, wrapTransportOnMessage, wrapTransportSend } from './transport';
import type { MCPServerInstance, McpServerWrapperOptions, MCPTransport, ResolvedMcpOptions } from './types';
import { validateMcpServerInstance } from './validation';
@@ -18,17 +18,24 @@ const wrappedMcpServerInstances = new WeakSet();
* and versions that expose the newer `registerTool`/`registerResource`/`registerPrompt` API (introduced in 1.x, sole API in 2.x).
* Automatically instruments transport methods and handler functions for comprehensive monitoring.
*
+ * Both call orderings are supported: wrapping before or after registering tools, resources,
+ * and prompts. Sentry patches the registration methods for future handlers and retroactively
+ * wraps any already-registered ones. Wrapping at construction time is recommended by
+ * convention (consistent with other SDK integrations), but is not required.
+ *
* @example
* ```typescript
* import * as Sentry from '@sentry/core';
* import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
* import { StreamableHTTPServerTransport } from '@modelcontextprotocol/sdk/server/streamableHttp.js';
*
- * // Default: inputs/outputs captured based on sendDefaultPii option
+ * // Wrap first, then register tools — this is the correct order
* const server = Sentry.wrapMcpServerWithSentry(
* new McpServer({ name: "my-server", version: "1.0.0" })
* );
*
+ * server.registerTool('my-tool', schema, handler);
+ *
* // Explicitly control input/output capture
* const server = Sentry.wrapMcpServerWithSentry(
* new McpServer({ name: "my-server", version: "1.0.0" }),
@@ -80,6 +87,8 @@ export function wrapMcpServerWithSentry(mcpServerInstance: S,
wrapAllMCPHandlers(serverInstance);
+ wrapExistingHandlers(serverInstance);
+
wrappedMcpServerInstances.add(mcpServerInstance);
return mcpServerInstance;
}
diff --git a/packages/core/src/server-exports.ts b/packages/core/src/server-exports.ts
new file mode 100644
index 000000000000..f5ad45486cb5
--- /dev/null
+++ b/packages/core/src/server-exports.ts
@@ -0,0 +1,43 @@
+/**
+ * Server-only utilities for Sentry SDKs.
+ *
+ * @module
+ */
+
+export type { ServerRuntimeClientOptions } from './server-runtime-client';
+export { ServerRuntimeClient } from './server-runtime-client';
+export type { ServerRuntimeOptions } from './types-hoist/options';
+export { trpcMiddleware } from './trpc';
+export { wrapMcpServerWithSentry } from './integrations/mcp-server';
+export { isNodeEnv, loadModule } from './utils/node';
+export { filenameIsInApp, node, nodeStackLineParser } from './utils/node-stack-trace';
+export { vercelWaitUntil } from './utils/vercelWaitUntil';
+export { flushIfServerless } from './utils/flushIfServerless';
+export { callFrameToStackFrame, watchdogTimer } from './utils/anr';
+export { safeUnref as _INTERNAL_safeUnref } from './utils/timer';
+export { patchExpressModule, setupExpressErrorHandler, expressErrorHandler } from './integrations/express/index';
+export type {
+ ExpressIntegrationOptions,
+ ExpressHandlerOptions,
+ ExpressMiddleware,
+ ExpressErrorMiddleware,
+} from './integrations/express/types';
+export { instrumentPostgresJsSql } from './integrations/postgresjs';
+
+export { patchHttpModuleClient } from './integrations/http/client-patch';
+export { getHttpClientSubscriptions } from './integrations/http/client-subscriptions';
+export { addOutgoingRequestBreadcrumb } from './integrations/http/add-outgoing-request-breadcrumb';
+export {
+ getRequestUrl,
+ getRequestUrlObject,
+ getRequestUrlFromClientRequest,
+ getRequestOptions,
+} from './integrations/http/get-request-url';
+export { HTTP_ON_CLIENT_REQUEST, HTTP_ON_SERVER_REQUEST } from './integrations/http/constants';
+export type {
+ HttpInstrumentationOptions,
+ HttpClientRequest,
+ HttpIncomingMessage,
+ HttpServerResponse,
+ HttpModuleExport,
+} from './integrations/http/types';
diff --git a/packages/core/src/server.ts b/packages/core/src/server.ts
new file mode 100644
index 000000000000..10c4a02625bf
--- /dev/null
+++ b/packages/core/src/server.ts
@@ -0,0 +1,11 @@
+/**
+ * The Sentry core SDK and integrations used by node, node-core, cloudflare,
+ * bun, deno, aws lambda, and other server-side platforms, where bundle size
+ * is less of an issue.
+ *
+ * This export should not contain anything strictly browser-specific.
+ */
+/* eslint-disable max-lines */
+
+export * from './shared-exports';
+export * from './server-exports';
diff --git a/packages/core/src/shared-exports.ts b/packages/core/src/shared-exports.ts
new file mode 100644
index 000000000000..dcc8c268f509
--- /dev/null
+++ b/packages/core/src/shared-exports.ts
@@ -0,0 +1,545 @@
+/**
+ * Utilites shared between server and browser SDKs.
+ */
+/* eslint-disable max-lines */
+
+export type { ClientClass as SentryCoreCurrentScopes } from './sdk';
+export type { AsyncContextStrategy } from './asyncContext/types';
+export type { Carrier } from './carrier';
+export type { OfflineStore, OfflineTransportOptions } from './transports/offline';
+export type { IntegrationIndex } from './integration';
+export * from './tracing';
+export * from './semanticAttributes';
+export { createEventEnvelope, createSessionEnvelope, createSpanEnvelope } from './envelope';
+export {
+ captureCheckIn,
+ withMonitor,
+ captureException,
+ captureEvent,
+ captureMessage,
+ lastEventId,
+ close,
+ flush,
+ setContext,
+ setExtra,
+ setExtras,
+ setTag,
+ setTags,
+ setUser,
+ setConversationId,
+ isInitialized,
+ isEnabled,
+ startSession,
+ endSession,
+ captureSession,
+ addEventProcessor,
+} from './exports';
+export {
+ getCurrentScope,
+ getIsolationScope,
+ getGlobalScope,
+ withScope,
+ withIsolationScope,
+ getClient,
+ getTraceContextFromScope,
+ registerExternalPropagationContext,
+ getExternalPropagationContext,
+ hasExternalPropagationContext,
+} from './currentScopes';
+export { getDefaultCurrentScope, getDefaultIsolationScope } from './defaultScopes';
+export { setAsyncContextStrategy } from './asyncContext';
+export { getGlobalSingleton, getMainCarrier } from './carrier';
+export { makeSession, closeSession, updateSession } from './session';
+export { Scope } from './scope';
+export type { CaptureContext, ScopeContext, ScopeData } from './scope';
+export { notifyEventProcessors } from './eventProcessors';
+export { getEnvelopeEndpointWithUrlEncodedAuth, getReportDialogEndpoint, SENTRY_API_VERSION } from './api';
+export { Client } from './client';
+export { initAndBind, setCurrentClient } from './sdk';
+export { createTransport } from './transports/base';
+export { makeOfflineTransport } from './transports/offline';
+export { makeMultiplexedTransport, MULTIPLEXED_TRANSPORT_EXTRA_KEY } from './transports/multiplexed';
+export { getIntegrationsToSetup, addIntegration, defineIntegration, installedIntegrations } from './integration';
+export {
+ _INTERNAL_skipAiProviderWrapping,
+ _INTERNAL_shouldSkipAiProviderWrapping,
+ _INTERNAL_clearAiProviderSkips,
+} from './utils/ai/providerSkip';
+export { envToBool } from './utils/envToBool';
+export { applyScopeDataToEvent, mergeScopeData, getCombinedScopeData } from './utils/scopeData';
+export { prepareEvent } from './utils/prepareEvent';
+export type { ExclusiveEventHintOrCaptureContext } from './utils/prepareEvent';
+export { createCheckInEnvelope } from './checkin';
+export { hasSpansEnabled } from './utils/hasSpansEnabled';
+export { withStreamedSpan } from './tracing/spans/beforeSendSpan';
+export { isStreamedBeforeSendSpanCallback } from './tracing/spans/beforeSendSpan';
+export { safeSetSpanJSONAttributes } from './tracing/spans/captureSpan';
+export { isSentryRequestUrl } from './utils/isSentryRequestUrl';
+export { handleCallbackErrors } from './utils/handleCallbackErrors';
+export { parameterize, fmt } from './utils/parameterize';
+export type { HandleTunnelRequestOptions } from './utils/tunnel';
+export { handleTunnelRequest } from './utils/tunnel';
+export { addAutoIpAddressToSession } from './utils/ipAddress';
+// eslint-disable-next-line deprecation/deprecation
+export { addAutoIpAddressToUser } from './utils/ipAddress';
+export {
+ convertSpanLinksForEnvelope,
+ spanToTraceHeader,
+ spanToJSON,
+ spanToStreamedSpanJSON,
+ spanIsSampled,
+ spanToTraceContext,
+ getSpanDescendants,
+ getStatusMessage,
+ getRootSpan,
+ INTERNAL_getSegmentSpan,
+ getActiveSpan,
+ addChildSpanToSpan,
+ spanTimeInputToSeconds,
+ updateSpanName,
+} from './utils/spanUtils';
+export { _setSpanForScope as _INTERNAL_setSpanForScope } from './utils/spanOnScope';
+export { parseSampleRate } from './utils/parseSampleRate';
+export { applySdkMetadata } from './utils/sdkMetadata';
+export { getTraceData } from './utils/traceData';
+export { shouldPropagateTraceForUrl } from './utils/tracePropagationTargets';
+export { getTraceMetaTags } from './utils/meta';
+export { debounce } from './utils/debounce';
+export { makeWeakRef, derefWeakRef } from './utils/weakRef';
+export type { MaybeWeakRef } from './utils/weakRef';
+export { shouldIgnoreSpan } from './utils/should-ignore-span';
+export {
+ winterCGHeadersToDict,
+ winterCGRequestToRequestData,
+ captureBodyFromWinterCGRequest,
+ httpRequestToRequestData,
+ extractQueryParamsFromUrl,
+ headersToDict,
+ httpHeadersToSpanAttributes,
+ getMaxBodyByteLength,
+ MAX_BODY_BYTE_LENGTH,
+} from './utils/request';
+export type { MaxRequestBodySize } from './utils/request';
+export { DEFAULT_ENVIRONMENT, DEV_ENVIRONMENT } from './constants';
+export { addBreadcrumb } from './breadcrumbs';
+export { functionToStringIntegration } from './integrations/functiontostring';
+// eslint-disable-next-line deprecation/deprecation
+export { inboundFiltersIntegration } from './integrations/eventFilters';
+export { eventFiltersIntegration } from './integrations/eventFilters';
+export { linkedErrorsIntegration } from './integrations/linkederrors';
+export { moduleMetadataIntegration } from './integrations/moduleMetadata';
+export { requestDataIntegration } from './integrations/requestdata';
+export { captureConsoleIntegration } from './integrations/captureconsole';
+export { dedupeIntegration } from './integrations/dedupe';
+export { extraErrorDataIntegration } from './integrations/extraerrordata';
+export { rewriteFramesIntegration } from './integrations/rewriteframes';
+export { supabaseIntegration, instrumentSupabaseClient } from './integrations/supabase';
+export { zodErrorsIntegration } from './integrations/zoderrors';
+export { thirdPartyErrorFilterIntegration } from './integrations/third-party-errors-filter';
+export { consoleIntegration } from './integrations/console';
+export type { FeatureFlagsIntegration } from './integrations/featureFlags';
+export { featureFlagsIntegration } from './integrations/featureFlags';
+export { growthbookIntegration } from './integrations/featureFlags';
+export { conversationIdIntegration } from './integrations/conversationId';
+export { profiler } from './profiling';
+// eslint thinks the entire function is deprecated (while only one overload is actually deprecated)
+// Therefore:
+// eslint-disable-next-line deprecation/deprecation
+export { instrumentFetchRequest, _INTERNAL_getTracingHeadersForFetchRequest } from './fetch';
+export { captureFeedback } from './feedback';
+export type { ReportDialogOptions } from './report-dialog';
+export { _INTERNAL_captureLog, _INTERNAL_flushLogsBuffer, _INTERNAL_captureSerializedLog } from './logs/internal';
+export * as logger from './logs/public-api';
+export { consoleLoggingIntegration } from './logs/console-integration';
+export {
+ _INTERNAL_captureMetric,
+ _INTERNAL_flushMetricsBuffer,
+ _INTERNAL_captureSerializedMetric,
+} from './metrics/internal';
+export * as metrics from './metrics/public-api';
+export type { MetricOptions } from './metrics/public-api';
+export { createConsolaReporter } from './integrations/consola';
+export { addVercelAiProcessors } from './tracing/vercel-ai';
+export { _INTERNAL_getSpanContextForToolCallId, _INTERNAL_cleanupToolCallSpanContext } from './tracing/vercel-ai/utils';
+export { toolCallSpanContextMap as _INTERNAL_toolCallSpanContextMap } from './tracing/vercel-ai/constants';
+export { instrumentOpenAiClient } from './tracing/openai';
+export { OPENAI_INTEGRATION_NAME } from './tracing/openai/constants';
+export { instrumentAnthropicAiClient } from './tracing/anthropic-ai';
+export { ANTHROPIC_AI_INTEGRATION_NAME } from './tracing/anthropic-ai/constants';
+export { instrumentGoogleGenAIClient } from './tracing/google-genai';
+export { GOOGLE_GENAI_INTEGRATION_NAME } from './tracing/google-genai/constants';
+export type { GoogleGenAIResponse } from './tracing/google-genai/types';
+export { createLangChainCallbackHandler, instrumentLangChainEmbeddings } from './tracing/langchain';
+export { LANGCHAIN_INTEGRATION_NAME } from './tracing/langchain/constants';
+export type { LangChainOptions, LangChainIntegration } from './tracing/langchain/types';
+export { instrumentStateGraphCompile, instrumentCreateReactAgent, instrumentLangGraph } from './tracing/langgraph';
+export { LANGGRAPH_INTEGRATION_NAME } from './tracing/langgraph/constants';
+export type { LangGraphOptions, LangGraphIntegration, CompiledGraph } from './tracing/langgraph/types';
+export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './tracing/openai/types';
+export type {
+ AnthropicAiClient,
+ AnthropicAiOptions,
+ AnthropicAiInstrumentedMethod,
+ AnthropicAiResponse,
+} from './tracing/anthropic-ai/types';
+export type {
+ GoogleGenAIClient,
+ GoogleGenAIChat,
+ GoogleGenAIOptions,
+ GoogleGenAIInstrumentedMethod,
+} from './tracing/google-genai/types';
+// eslint-disable-next-line deprecation/deprecation
+export type { GoogleGenAIIstrumentedMethod } from './tracing/google-genai/types';
+export { SpanBuffer } from './tracing/spans/spanBuffer';
+export { hasSpanStreamingEnabled } from './tracing/spans/hasSpanStreamingEnabled';
+export { spanStreamingIntegration } from './integrations/spanStreaming';
+export type { FeatureFlag } from './utils/featureFlags';
+export {
+ _INTERNAL_copyFlagsFromScopeToEvent,
+ _INTERNAL_insertFlagToScope,
+ _INTERNAL_addFeatureFlagToActiveSpan,
+ _INTERNAL_FLAG_BUFFER_SIZE,
+ _INTERNAL_MAX_FLAGS_PER_SPAN,
+} from './utils/featureFlags';
+export { applyAggregateErrorsToEvent } from './utils/aggregate-errors';
+export { getBreadcrumbLogLevelFromHttpStatusCode } from './utils/breadcrumb-log-level';
+export { dsnFromString, dsnToString, makeDsn } from './utils/dsn';
+// eslint-disable-next-line deprecation/deprecation
+export { SentryError } from './utils/error';
+export { GLOBAL_OBJ } from './utils/worldwide';
+export type { InternalGlobal } from './utils/worldwide';
+export { addConsoleInstrumentationHandler, addConsoleInstrumentationFilter } from './instrument/console';
+export { addFetchEndInstrumentationHandler, addFetchInstrumentationHandler } from './instrument/fetch';
+export { addGlobalErrorInstrumentationHandler } from './instrument/globalError';
+export { addGlobalUnhandledRejectionInstrumentationHandler } from './instrument/globalUnhandledRejection';
+export { addHandler, maybeInstrument, resetInstrumentationHandlers, triggerHandlers } from './instrument/handlers';
+export {
+ isDOMError,
+ isDOMException,
+ isElement,
+ isError,
+ isErrorEvent,
+ isEvent,
+ isInstanceOf,
+ isParameterizedString,
+ isPlainObject,
+ isPrimitive,
+ isRegExp,
+ isString,
+ isSyntheticEvent,
+ isThenable,
+ isVueViewModel,
+} from './utils/is';
+export { isBrowser } from './utils/isBrowser';
+export { CONSOLE_LEVELS, consoleSandbox, debug, originalConsoleMethods } from './utils/debug-logger';
+export type { SentryDebugLogger } from './utils/debug-logger';
+export {
+ addContextToFrame,
+ addExceptionMechanism,
+ addExceptionTypeValue,
+ checkOrSetAlreadyCaught,
+ isAlreadyCaptured,
+ getEventDescription,
+ parseSemver,
+ uuid4,
+} from './utils/misc';
+export { normalize, normalizeToSize, normalizeUrlToBase } from './utils/normalize';
+export { setNormalizationDepthOverrideHint, setSkipNormalizationHint } from './utils/normalizationHints';
+export {
+ addNonEnumerableProperty,
+ convertToPlainObject,
+ // eslint-disable-next-line deprecation/deprecation
+ dropUndefinedKeys,
+ extractExceptionKeysForMessage,
+ fill,
+ getOriginalFunction,
+ markFunctionWrapped,
+ objectify,
+} from './utils/object';
+export { basename, dirname, isAbsolute, join, normalizePath, relative, resolve } from './utils/path';
+export { makePromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from './utils/promisebuffer';
+export type { PromiseBuffer } from './utils/promisebuffer';
+export { severityLevelFromString } from './utils/severity';
+export { replaceExports } from './utils/exports';
+export {
+ UNKNOWN_FUNCTION,
+ createStackParser,
+ getFramesFromEvent,
+ getFunctionName,
+ stackParserFromStackParserOptions,
+ stripSentryFramesAndReverse,
+} from './utils/stacktrace';
+export { isMatchingPattern, safeJoin, snipLine, stringMatchesSomePattern, truncate } from './utils/string';
+export {
+ isNativeFunction,
+ supportsDOMException,
+ supportsErrorEvent,
+ // eslint-disable-next-line deprecation/deprecation
+ supportsFetch,
+ // eslint-disable-next-line deprecation/deprecation
+ supportsReferrerPolicy,
+} from './utils/supports';
+export { SyncPromise, rejectedSyncPromise, resolvedSyncPromise } from './utils/syncpromise';
+export { browserPerformanceTimeOrigin, dateTimestampInSeconds, timestampInSeconds } from './utils/time';
+export {
+ TRACEPARENT_REGEXP,
+ extractTraceparentData,
+ generateSentryTraceHeader,
+ propagationContextFromHeaders,
+ shouldContinueTrace,
+ generateTraceparentHeader,
+} from './utils/tracing';
+export { getSDKSource, isBrowserBundle } from './utils/env';
+export type { SdkSource } from './utils/env';
+export {
+ addItemToEnvelope,
+ createAttachmentEnvelopeItem,
+ createEnvelope,
+ createEventEnvelopeHeaders,
+ createSpanEnvelopeItem,
+ envelopeContainsItemType,
+ envelopeItemTypeToDataCategory,
+ forEachEnvelopeItem,
+ getSdkMetadataForEnvelopeHeader,
+ parseEnvelope,
+ serializeEnvelope,
+} from './utils/envelope';
+export { createClientReportEnvelope } from './utils/clientreport';
+export {
+ DEFAULT_RETRY_AFTER,
+ disabledUntil,
+ isRateLimited,
+ parseRetryAfterHeader,
+ updateRateLimits,
+} from './utils/ratelimit';
+export type { RateLimits } from './utils/ratelimit';
+export {
+ MAX_BAGGAGE_STRING_LENGTH,
+ SENTRY_BAGGAGE_KEY_PREFIX,
+ SENTRY_BAGGAGE_KEY_PREFIX_REGEX,
+ baggageHeaderToDynamicSamplingContext,
+ dynamicSamplingContextToSentryBaggageHeader,
+ parseBaggageHeader,
+ objectToBaggageHeader,
+ mergeBaggageHeaders,
+} from './utils/baggage';
+export {
+ getSanitizedUrlString,
+ parseUrl,
+ stripUrlQueryAndFragment,
+ parseStringToURLObject,
+ getHttpSpanDetailsFromUrlObject,
+ isURLObjectRelative,
+ getSanitizedUrlStringFromUrlObject,
+ stripDataUrlContent,
+} from './utils/url';
+export {
+ eventFromMessage,
+ eventFromUnknownInput,
+ exceptionFromError,
+ parseStackFrames,
+ _enhanceErrorWithSentryInfo as _INTERNAL_enhanceErrorWithSentryInfo,
+} from './utils/eventbuilder';
+export { LRUMap } from './utils/lru';
+export { generateTraceId, generateSpanId } from './utils/propagationContext';
+export { SDK_VERSION } from './utils/version';
+export { getDebugImagesForResources, getFilenameToDebugIdMap } from './utils/debug-ids';
+export { getFilenameToMetadataMap } from './metadata';
+export { escapeStringForRegex } from './vendor/escapeStringForRegex';
+export type { Attachment } from './types-hoist/attachment';
+export type { Breadcrumb, BreadcrumbHint, FetchBreadcrumbData, FetchBreadcrumbHint } from './types-hoist/breadcrumb';
+export type { ClientReport, Outcome, EventDropReason } from './types-hoist/clientreport';
+export type {
+ Context,
+ Contexts,
+ DeviceContext,
+ OsContext,
+ AppContext,
+ CultureContext,
+ TraceContext,
+ CloudResourceContext,
+ MissingInstrumentationContext,
+} from './types-hoist/context';
+export type { DataCategory } from './types-hoist/datacategory';
+export type { DsnComponents, DsnLike, DsnProtocol } from './types-hoist/dsn';
+export type { DebugImage, DebugMeta } from './types-hoist/debugMeta';
+export type {
+ AttachmentItem,
+ BaseEnvelopeHeaders,
+ BaseEnvelopeItemHeaders,
+ ClientReportEnvelope,
+ ClientReportItem,
+ DynamicSamplingContext,
+ Envelope,
+ EnvelopeItemType,
+ EnvelopeItem,
+ EventEnvelope,
+ EventEnvelopeHeaders,
+ EventItem,
+ ReplayEnvelope,
+ FeedbackItem,
+ SessionEnvelope,
+ SessionItem,
+ UserFeedbackItem,
+ CheckInItem,
+ CheckInEnvelope,
+ RawSecurityEnvelope,
+ RawSecurityItem,
+ ProfileItem,
+ ProfileChunkEnvelope,
+ ProfileChunkItem,
+ SpanEnvelope,
+ StreamedSpanEnvelope,
+ SpanItem,
+ LogEnvelope,
+ MetricEnvelope,
+} from './types-hoist/envelope';
+export type { ExtendedError } from './types-hoist/error';
+export type { Event, EventHint, EventType, ErrorEvent, TransactionEvent } from './types-hoist/event';
+export type { EventProcessor } from './types-hoist/eventprocessor';
+export type { Exception } from './types-hoist/exception';
+export type { Extra, Extras } from './types-hoist/extra';
+export type { Integration, IntegrationFn } from './types-hoist/integration';
+export type { Mechanism } from './types-hoist/mechanism';
+export type { ExtractedNodeRequestData, HttpHeaderValue, Primitive, WorkerLocation } from './types-hoist/misc';
+export type { ClientOptions, CoreOptions as Options } from './types-hoist/options';
+export type { Package } from './types-hoist/package';
+export type { PolymorphicEvent, PolymorphicRequest } from './types-hoist/polymorphics';
+export type {
+ ThreadId,
+ FrameId,
+ StackId,
+ ThreadCpuSample,
+ ThreadCpuStack,
+ ThreadCpuFrame,
+ ThreadCpuProfile,
+ ContinuousThreadCpuProfile,
+ Profile,
+ ProfileChunk,
+} from './types-hoist/profiling';
+export type {
+ ReplayEndEvent,
+ ReplayEvent,
+ ReplayRecordingData,
+ ReplayRecordingMode,
+ ReplayStartEvent,
+ ReplayStopReason,
+} from './types-hoist/replay';
+export type {
+ FeedbackErrorCode,
+ FeedbackErrorMessages,
+ FeedbackEvent,
+ FeedbackFormData,
+ FeedbackInternalOptions,
+ FeedbackModalIntegration,
+ FeedbackScreenshotIntegration,
+ SendFeedback,
+ SendFeedbackParams,
+ UserFeedback,
+} from './types-hoist/feedback';
+export type {
+ QueryParams,
+ RequestEventData,
+ RequestHookInfo,
+ ResponseHookInfo,
+ SanitizedRequestData,
+} from './types-hoist/request';
+export type { Runtime } from './types-hoist/runtime';
+export type { SdkInfo } from './types-hoist/sdkinfo';
+export type { SdkMetadata } from './types-hoist/sdkmetadata';
+export type {
+ SessionAggregates,
+ AggregationCounts,
+ Session,
+ SessionContext,
+ SessionStatus,
+ SerializedSession,
+} from './types-hoist/session';
+export type { SeverityLevel } from './types-hoist/severity';
+export type {
+ Span,
+ SentrySpanArguments,
+ SpanOrigin,
+ SpanAttributeValue,
+ SpanAttributes,
+ SpanTimeInput,
+ SpanJSON,
+ SpanContextData,
+ TraceFlag,
+ SerializedStreamedSpan,
+ SerializedStreamedSpanContainer,
+ StreamedSpanJSON,
+} from './types-hoist/span';
+export type { SpanStatus } from './types-hoist/spanStatus';
+export type { Log, LogSeverityLevel } from './types-hoist/log';
+export type { SpanLink } from './types-hoist/link';
+export type {
+ Metric,
+ MetricType,
+ SerializedMetric,
+ SerializedMetricContainer,
+ // eslint-disable-next-line deprecation/deprecation
+ SerializedMetricAttributeValue,
+} from './types-hoist/metric';
+export type { TimedEvent } from './types-hoist/timedEvent';
+export type { StackFrame } from './types-hoist/stackframe';
+export type { Stacktrace, StackParser, StackLineParser, StackLineParserFn } from './types-hoist/stacktrace';
+export type { PropagationContext, TracePropagationTargets, SerializedTraceData } from './types-hoist/tracing';
+export type { StartSpanOptions } from './types-hoist/startSpanOptions';
+export type { TraceparentData, TransactionSource } from './types-hoist/transaction';
+export type {
+ TracesSamplerSamplingContext,
+ CustomSamplingContext,
+ SamplingContext,
+} from './types-hoist/samplingcontext';
+export type {
+ DurationUnit,
+ InformationUnit,
+ FractionUnit,
+ MeasurementUnit,
+ NoneUnit,
+ Measurements,
+} from './types-hoist/measurement';
+export type { Thread } from './types-hoist/thread';
+export type {
+ Transport,
+ TransportRequest,
+ TransportMakeRequestResponse,
+ InternalBaseTransportOptions,
+ BaseTransportOptions,
+ TransportRequestExecutor,
+} from './types-hoist/transport';
+export type { User } from './types-hoist/user';
+export type { WebFetchHeaders, WebFetchRequest } from './types-hoist/webfetchapi';
+export type { WrappedFunction } from './types-hoist/wrappedfunction';
+export type {
+ HandlerDataFetch,
+ HandlerDataConsole,
+ HandlerDataError,
+ HandlerDataUnhandledRejection,
+ ConsoleLevel,
+} from './types-hoist/instrument';
+export type {
+ CheckIn,
+ MonitorConfig,
+ FinishedCheckIn,
+ InProgressCheckIn,
+ SerializedCheckIn,
+} from './types-hoist/checkin';
+export type { ParameterizedString } from './types-hoist/parameterize';
+export type { ContinuousProfiler, ProfilingIntegration, Profiler } from './types-hoist/profiling';
+export type { ViewHierarchyData, ViewHierarchyWindow } from './types-hoist/view-hierarchy';
+export type { LegacyCSPReport } from './types-hoist/csp';
+export type { SerializedLog, SerializedLogContainer } from './types-hoist/log';
+export type {
+ BuildTimeOptionsBase,
+ UnstableVitePluginOptions,
+ UnstableRollupPluginOptions,
+ UnstableWebpackPluginOptions,
+} from './build-time-plugins/buildTimeOptionsBase';
+export type { RandomSafeContextRunner as _INTERNAL_RandomSafeContextRunner } from './utils/randomSafeContext';
+export {
+ withRandomSafeContext as _INTERNAL_withRandomSafeContext,
+ safeMathRandom as _INTERNAL_safeMathRandom,
+ safeDateNow as _INTERNAL_safeDateNow,
+} from './utils/randomSafeContext';
diff --git a/packages/core/src/tracing/sentrySpan.ts b/packages/core/src/tracing/sentrySpan.ts
index d9ab115b94cd..92c4617f29c2 100644
--- a/packages/core/src/tracing/sentrySpan.ts
+++ b/packages/core/src/tracing/sentrySpan.ts
@@ -392,8 +392,12 @@ export class SentrySpan implements Span {
// remove internal root span attributes we don't need to send.
/* eslint-disable @typescript-eslint/no-dynamic-delete */
delete this._attributes[SEMANTIC_ATTRIBUTE_SENTRY_CUSTOM_SPAN_NAME];
+ let hasGenAiSpans = false;
spans.forEach(span => {
delete span.data[SEMANTIC_ATTRIBUTE_SENTRY_CUSTOM_SPAN_NAME];
+ if (span.op?.startsWith('gen_ai.')) {
+ hasGenAiSpans = true;
+ }
});
// eslint-enabled-next-line @typescript-eslint/no-dynamic-delete
@@ -415,6 +419,7 @@ export class SentrySpan implements Span {
capturedSpanScope,
capturedSpanIsolationScope,
dynamicSamplingContext: getDynamicSamplingContextFromSpan(this),
+ hasGenAiSpans,
},
request: normalizedRequest,
...(source && {
diff --git a/packages/core/src/tracing/spans/extractGenAiSpans.ts b/packages/core/src/tracing/spans/extractGenAiSpans.ts
new file mode 100644
index 000000000000..42c47804728a
--- /dev/null
+++ b/packages/core/src/tracing/spans/extractGenAiSpans.ts
@@ -0,0 +1,58 @@
+import type { Client } from '../../client';
+import type { SpanContainerItem } from '../../types-hoist/envelope';
+import type { Event } from '../../types-hoist/event';
+import { isBrowser } from '../../utils/isBrowser';
+import { hasSpanStreamingEnabled } from './hasSpanStreamingEnabled';
+import { spanJsonToSerializedStreamedSpan } from './spanJsonToStreamedSpan';
+
+/**
+ * Extracts gen_ai spans from a transaction event, converts them to span v2 format,
+ * and returns them as a SpanContainerItem.
+ *
+ * Only applies to static mode (non-streaming) transactions.
+ *
+ * WARNING: This function mutates `event.spans` by removing the extracted gen_ai spans
+ * from the array. Call this before creating the event envelope so the transaction
+ * item does not include the extracted spans.
+ */
+export function extractGenAiSpansFromEvent(event: Event, client: Client): SpanContainerItem | undefined {
+ if (
+ event.type !== 'transaction' ||
+ !event.spans?.length ||
+ !event.sdkProcessingMetadata?.hasGenAiSpans ||
+ !client.getOptions().streamGenAiSpans ||
+ hasSpanStreamingEnabled(client)
+ ) {
+ return undefined;
+ }
+
+ const genAiSpans = [];
+ const remainingSpans = [];
+
+ for (const span of event.spans) {
+ if (span.op?.startsWith('gen_ai.')) {
+ genAiSpans.push(spanJsonToSerializedStreamedSpan(span));
+ } else {
+ remainingSpans.push(span);
+ }
+ }
+
+ if (genAiSpans.length === 0) {
+ return undefined;
+ }
+
+ event.spans = remainingSpans;
+
+ const inferSetting = client.getOptions().sendDefaultPii ? 'auto' : 'never';
+
+ return [
+ { type: 'span', item_count: genAiSpans.length, content_type: 'application/vnd.sentry.items.span.v2+json' },
+ {
+ version: 2,
+ ...(isBrowser() && {
+ ingest_settings: { infer_ip: inferSetting, infer_user_agent: inferSetting },
+ }),
+ items: genAiSpans,
+ },
+ ];
+}
diff --git a/packages/core/src/tracing/spans/spanJsonToStreamedSpan.ts b/packages/core/src/tracing/spans/spanJsonToStreamedSpan.ts
new file mode 100644
index 000000000000..4dfd6c5202b9
--- /dev/null
+++ b/packages/core/src/tracing/spans/spanJsonToStreamedSpan.ts
@@ -0,0 +1,23 @@
+import type { RawAttributes } from '../../attributes';
+import type { SerializedStreamedSpan, SpanJSON, StreamedSpanJSON } from '../../types-hoist/span';
+import { streamedSpanJsonToSerializedSpan } from '../../utils/spanUtils';
+
+/**
+ * Converts a v1 SpanJSON (from a legacy transaction) to a serialized v2 StreamedSpan.
+ */
+export function spanJsonToSerializedStreamedSpan(span: SpanJSON): SerializedStreamedSpan {
+ const streamedSpan: StreamedSpanJSON = {
+ trace_id: span.trace_id,
+ span_id: span.span_id,
+ parent_span_id: span.parent_span_id,
+ name: span.description || '',
+ start_timestamp: span.start_timestamp,
+ end_timestamp: span.timestamp || span.start_timestamp,
+ status: !span.status || span.status === 'ok' || span.status === 'cancelled' ? 'ok' : 'error',
+ is_segment: false,
+ attributes: { ...(span.data as RawAttributes>) },
+ links: span.links,
+ };
+
+ return streamedSpanJsonToSerializedSpan(streamedSpan);
+}
diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts
index c6ff4c784dde..4abdb0af5eae 100644
--- a/packages/core/src/tracing/vercel-ai/index.ts
+++ b/packages/core/src/tracing/vercel-ai/index.ts
@@ -325,8 +325,13 @@ export function processVercelAiSpanAttributes(attributes: Record;
export type SessionEnvelope = BaseEnvelope;
export type ClientReportEnvelope = BaseEnvelope;
diff --git a/packages/core/src/types-hoist/options.ts b/packages/core/src/types-hoist/options.ts
index a1fc1e074a75..db65585d9e43 100644
--- a/packages/core/src/types-hoist/options.ts
+++ b/packages/core/src/types-hoist/options.ts
@@ -553,6 +553,15 @@ export interface ClientOptions boolean)> = [],
+ patterns:
+ | Array boolean)>
+ | Set boolean)> = [],
requireExactStringMatch: boolean = false,
): boolean {
- return patterns.some(pattern => isMatchingPattern(testString, pattern, requireExactStringMatch));
+ for (const pattern of patterns) {
+ if (isMatchingPattern(testString, pattern, requireExactStringMatch)) {
+ return true;
+ }
+ }
+ return false;
}
diff --git a/packages/core/test/lib/instrument/console.test.ts b/packages/core/test/lib/instrument/console.test.ts
index 2499a231712d..f67f7da1a1a6 100644
--- a/packages/core/test/lib/instrument/console.test.ts
+++ b/packages/core/test/lib/instrument/console.test.ts
@@ -1,22 +1,110 @@
-import { describe, expect, it, vi } from 'vitest';
-import { addConsoleInstrumentationHandler } from '../../../src/instrument/console';
+import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
+import {
+ _INTERNAL_resetConsoleInstrumentationOptions,
+ addConsoleInstrumentationFilter,
+ addConsoleInstrumentationHandler,
+} from '../../../src/instrument/console';
import { GLOBAL_OBJ } from '../../../src/utils/worldwide';
+import { debug, originalConsoleMethods } from '../../../src/utils/debug-logger';
+import { resetInstrumentationHandlers } from '../../../src/instrument/handlers';
describe('addConsoleInstrumentationHandler', () => {
+ let _originalConsoleMethods: typeof originalConsoleMethods = {};
+
+ afterEach(() => {
+ Object.assign(originalConsoleMethods, _originalConsoleMethods);
+ resetInstrumentationHandlers();
+ vi.restoreAllMocks();
+ });
+
+ // This cannot be done in beforeEach, as the first invocation of `addConsoleInstrumentationHandler` will overwrite the original console methods.
+ // Due to `fill` being called
+ // So instead, we need to call this each time after calling `addConsoleInstrumentationHandler`
+ function mockConsoleMethods() {
+ // Re-store this with the current implementation
+ Object.assign(_originalConsoleMethods, originalConsoleMethods);
+
+ // Overwrite with mock console methods
+ Object.assign(originalConsoleMethods, {
+ log: vi.fn(),
+ warn: vi.fn(),
+ error: vi.fn(),
+ debug: vi.fn(),
+ info: vi.fn(),
+ });
+ }
+
it.each(['log', 'warn', 'error', 'debug', 'info'] as const)(
'calls registered handler when console.%s is called',
level => {
const handler = vi.fn();
addConsoleInstrumentationHandler(handler);
+ mockConsoleMethods();
GLOBAL_OBJ.console[level]('test message');
expect(handler).toHaveBeenCalledWith(expect.objectContaining({ args: ['test message'], level }));
+ expect(originalConsoleMethods[level]).toHaveBeenCalledWith('test message');
},
);
it('calls through to the underlying console method without throwing', () => {
addConsoleInstrumentationHandler(vi.fn());
+ mockConsoleMethods();
expect(() => GLOBAL_OBJ.console.log('hello')).not.toThrow();
});
+
+ describe('filter', () => {
+ afterEach(() => {
+ _INTERNAL_resetConsoleInstrumentationOptions();
+ });
+
+ describe('when debug is disabled', () => {
+ beforeEach(() => {
+ vi.spyOn(debug, 'isEnabled').mockImplementation(() => false);
+ });
+
+ it('filters out messages that match the filter', () => {
+ const handler = vi.fn();
+ addConsoleInstrumentationHandler(handler);
+ addConsoleInstrumentationFilter(['test message']);
+ mockConsoleMethods();
+
+ GLOBAL_OBJ.console.log('test message');
+
+ expect(originalConsoleMethods.log).not.toHaveBeenCalledWith('test message');
+ expect(handler).not.toHaveBeenCalled();
+ });
+
+ it('does not filter out messages that do not match the filter', () => {
+ const handler = vi.fn();
+ addConsoleInstrumentationHandler(handler);
+ addConsoleInstrumentationFilter(['test message']);
+ mockConsoleMethods();
+
+ GLOBAL_OBJ.console.log('other message');
+
+ expect(handler).toHaveBeenCalled();
+ expect(originalConsoleMethods.log).toHaveBeenCalledWith('other message');
+ });
+ });
+
+ describe('when debug is enabled', () => {
+ beforeEach(() => {
+ vi.spyOn(debug, 'isEnabled').mockImplementation(() => true);
+ });
+
+ it('logs filtered messages but does not call the handler for them', () => {
+ const handler = vi.fn();
+ addConsoleInstrumentationHandler(handler);
+ addConsoleInstrumentationFilter(['test message']);
+ mockConsoleMethods();
+
+ GLOBAL_OBJ.console.log('test message');
+
+ expect(handler).not.toHaveBeenCalled();
+ expect(originalConsoleMethods.log).toHaveBeenCalledWith('test message');
+ });
+ });
+ });
});
diff --git a/packages/core/test/lib/integration.test.ts b/packages/core/test/lib/integration.test.ts
index 75e13374daa7..e5362d669716 100644
--- a/packages/core/test/lib/integration.test.ts
+++ b/packages/core/test/lib/integration.test.ts
@@ -73,7 +73,7 @@ describe('getIntegrationsToSetup', () => {
expect(integrations.map(i => i.name)).toEqual(expected);
});
- test('it uses passed integration over default intergation', () => {
+ test('it uses passed integration over default integration', () => {
const integrationDefault = new MockIntegration('ChaseSquirrels');
const integration1 = new MockIntegration('ChaseSquirrels');
diff --git a/packages/core/test/lib/integrations/mcp-server/mcpServerWrapper.test.ts b/packages/core/test/lib/integrations/mcp-server/mcpServerWrapper.test.ts
index 3fc48a2e0b47..845d7d4786da 100644
--- a/packages/core/test/lib/integrations/mcp-server/mcpServerWrapper.test.ts
+++ b/packages/core/test/lib/integrations/mcp-server/mcpServerWrapper.test.ts
@@ -2,7 +2,11 @@ import { beforeEach, describe, expect, it, vi } from 'vitest';
import * as currentScopes from '../../../../src/currentScopes';
import { wrapMcpServerWithSentry } from '../../../../src/integrations/mcp-server';
import * as tracingModule from '../../../../src/tracing';
-import { createMockMcpServer, createMockMcpServerWithRegisterApi } from './testUtils';
+import {
+ createMockMcpServer,
+ createMockMcpServerWithPreregisteredHandlers,
+ createMockMcpServerWithRegisterApi,
+} from './testUtils';
describe('wrapMcpServerWithSentry', () => {
const startSpanSpy = vi.spyOn(tracingModule, 'startSpan');
@@ -145,6 +149,41 @@ describe('wrapMcpServerWithSentry', () => {
});
});
+ describe('Retroactive handler wrapping (handlers registered before wrapMcpServerWithSentry)', () => {
+ it('should replace executor/readCallback/handler on pre-registered entries with wrapped versions', () => {
+ const server = createMockMcpServerWithPreregisteredHandlers();
+ const { toolExecutor, resourceReadCallback, resourceTemplateReadCallback, promptHandler } = server._originals;
+
+ wrapMcpServerWithSentry(server);
+
+ expect(server._registeredTools['my-tool']!.executor).not.toBe(toolExecutor);
+ expect(server._registeredResources['res://my-resource']!.readCallback).not.toBe(resourceReadCallback);
+ expect(server._registeredResourceTemplates['my-template']!.readCallback).not.toBe(resourceTemplateReadCallback);
+ expect(server._registeredPrompts['my-prompt']!.handler).not.toBe(promptHandler);
+ });
+
+ it('should still wrap the registration methods for future handlers', () => {
+ const server = createMockMcpServerWithPreregisteredHandlers();
+ const originalRegisterTool = server.registerTool;
+
+ wrapMcpServerWithSentry(server);
+
+ expect(server.registerTool).not.toBe(originalRegisterTool);
+ });
+
+ it('should not double-wrap if called twice on the same instance with pre-registered handlers', () => {
+ const server = createMockMcpServerWithPreregisteredHandlers();
+
+ wrapMcpServerWithSentry(server);
+ const executorAfterFirstWrap = server._registeredTools['my-tool']!.executor;
+
+ wrapMcpServerWithSentry(server);
+ const executorAfterSecondWrap = server._registeredTools['my-tool']!.executor;
+
+ expect(executorAfterFirstWrap).toBe(executorAfterSecondWrap);
+ });
+ });
+
describe('Handler Wrapping (register* API)', () => {
let mockServer: ReturnType;
let wrappedServer: ReturnType;
diff --git a/packages/core/test/lib/integrations/mcp-server/testUtils.ts b/packages/core/test/lib/integrations/mcp-server/testUtils.ts
index 23b9ee6ff51b..ebe940365d90 100644
--- a/packages/core/test/lib/integrations/mcp-server/testUtils.ts
+++ b/packages/core/test/lib/integrations/mcp-server/testUtils.ts
@@ -15,6 +15,41 @@ export function createMockMcpServer() {
};
}
+/**
+ * Create a mock MCP server that simulates already having tools/resources/prompts registered
+ * (i.e. wrapMcpServerWithSentry is called after registration). Mirrors the internal shape
+ * used by McpServer v2: tools have an `executor`, resources/prompts have `readCallback`/`handler`.
+ */
+export function createMockMcpServerWithPreregisteredHandlers() {
+ const toolExecutor = vi.fn().mockResolvedValue({ content: [] });
+ const resourceReadCallback = vi.fn().mockResolvedValue({ contents: [] });
+ const resourceTemplateReadCallback = vi.fn().mockResolvedValue({ contents: [] });
+ const promptHandler = vi.fn().mockResolvedValue({ messages: [] });
+
+ return {
+ registerTool: vi.fn(),
+ registerResource: vi.fn(),
+ registerPrompt: vi.fn(),
+ connect: vi.fn().mockResolvedValue(undefined),
+ server: { setRequestHandler: vi.fn() },
+ // Simulated internal registries (mirrors McpServer v2 private fields)
+ _registeredTools: {
+ 'my-tool': { executor: toolExecutor },
+ },
+ _registeredResources: {
+ 'res://my-resource': { readCallback: resourceReadCallback },
+ },
+ _registeredResourceTemplates: {
+ 'my-template': { readCallback: resourceTemplateReadCallback },
+ },
+ _registeredPrompts: {
+ 'my-prompt': { handler: promptHandler },
+ },
+ // Expose the original fns so tests can assert wrapping happened
+ _originals: { toolExecutor, resourceReadCallback, resourceTemplateReadCallback, promptHandler },
+ };
+}
+
/**
* Create a mock MCP server instance using the new register* API (SDK >=1.x / 2.x)
*/
diff --git a/packages/core/test/lib/tracing/sentrySpan.test.ts b/packages/core/test/lib/tracing/sentrySpan.test.ts
index 4b70e1c3ef97..57ac2cdf5ba3 100644
--- a/packages/core/test/lib/tracing/sentrySpan.test.ts
+++ b/packages/core/test/lib/tracing/sentrySpan.test.ts
@@ -234,6 +234,7 @@ describe('SentrySpan', () => {
trace_id: expect.stringMatching(/^[a-f0-9]{32}$/),
transaction: 'test',
},
+ hasGenAiSpans: false,
},
spans: [],
start_timestamp: 1,
diff --git a/packages/core/test/lib/tracing/spans/extractGenAiSpans.test.ts b/packages/core/test/lib/tracing/spans/extractGenAiSpans.test.ts
new file mode 100644
index 000000000000..e3286fb76a97
--- /dev/null
+++ b/packages/core/test/lib/tracing/spans/extractGenAiSpans.test.ts
@@ -0,0 +1,158 @@
+import { describe, expect, it } from 'vitest';
+import type { Event } from '../../../../src/types-hoist/event';
+import type { SpanJSON } from '../../../../src/types-hoist/span';
+import { extractGenAiSpansFromEvent } from '../../../../src/tracing/spans/extractGenAiSpans';
+import { getDefaultTestClientOptions, TestClient } from '../../../mocks/client';
+
+function makeSpanJSON(overrides: Partial = {}): SpanJSON {
+ return {
+ span_id: 'abc123def456789a',
+ trace_id: '00112233445566778899aabbccddeeff',
+ start_timestamp: 1000,
+ data: {},
+ ...overrides,
+ };
+}
+
+function makeTransactionEvent(spans: SpanJSON[]): Event {
+ return {
+ type: 'transaction',
+ transaction: 'GET /api/chat',
+ release: '1.0.0',
+ environment: 'production',
+ contexts: {
+ trace: {
+ span_id: 'root0000deadbeef',
+ trace_id: '00112233445566778899aabbccddeeff',
+ },
+ },
+ sdkProcessingMetadata: {
+ hasGenAiSpans: true,
+ },
+ spans,
+ };
+}
+
+function makeClient(options: Partial[0]> = {}): TestClient {
+ return new TestClient(
+ getDefaultTestClientOptions({
+ dsn: 'https://dsn@ingest.f00.f00/1',
+ streamGenAiSpans: true,
+ ...options,
+ }),
+ );
+}
+
+describe('extractGenAiSpansFromEvent', () => {
+ it('extracts gen_ai spans and removes them from the event', () => {
+ const genAiSpan = makeSpanJSON({
+ span_id: 'genai001',
+ op: 'gen_ai.chat',
+ description: 'chat gpt-4',
+ timestamp: 1005,
+ });
+ const httpSpan = makeSpanJSON({
+ span_id: 'http001',
+ op: 'http.client',
+ description: 'GET /api',
+ timestamp: 1002,
+ });
+
+ const event = makeTransactionEvent([genAiSpan, httpSpan], true);
+ const result = extractGenAiSpansFromEvent(event, makeClient());
+
+ expect(result).toBeDefined();
+ const [headers, payload] = result!;
+ expect(headers.type).toBe('span');
+ expect(headers.item_count).toBe(1);
+ expect(headers.content_type).toBe('application/vnd.sentry.items.span.v2+json');
+ expect(payload.version).toBe(2);
+ expect(payload.items).toHaveLength(1);
+ expect(payload.items[0]!.span_id).toBe('genai001');
+ expect(payload.items[0]!.name).toBe('chat gpt-4');
+
+ expect(event.spans).toHaveLength(1);
+ expect(event.spans![0]!.span_id).toBe('http001');
+ });
+
+ it('extracts multiple gen_ai spans', () => {
+ const chatSpan = makeSpanJSON({ span_id: 'chat001', op: 'gen_ai.chat', description: 'chat' });
+ const embeddingsSpan = makeSpanJSON({ span_id: 'embed001', op: 'gen_ai.embeddings', description: 'embed' });
+ const agentSpan = makeSpanJSON({ span_id: 'agent001', op: 'gen_ai.invoke_agent', description: 'agent' });
+ const dbSpan = makeSpanJSON({ span_id: 'db001', op: 'db.query', description: 'SELECT *' });
+
+ const event = makeTransactionEvent([chatSpan, embeddingsSpan, dbSpan, agentSpan], true);
+ const result = extractGenAiSpansFromEvent(event, makeClient());
+
+ expect(result).toBeDefined();
+ expect(result![0].item_count).toBe(3);
+ expect(result![1].items).toHaveLength(3);
+ expect(result![1].items.map(s => s.span_id)).toEqual(['chat001', 'embed001', 'agent001']);
+
+ expect(event.spans).toHaveLength(1);
+ expect(event.spans![0]!.span_id).toBe('db001');
+ });
+
+ it('returns undefined when hasGenAiSpans flag is not set', () => {
+ const event: Event = {
+ type: 'transaction',
+ spans: [makeSpanJSON({ op: 'gen_ai.chat' })],
+ sdkProcessingMetadata: {},
+ };
+
+ expect(extractGenAiSpansFromEvent(event, makeClient())).toBeUndefined();
+ expect(event.spans).toHaveLength(1);
+ });
+
+ it('returns undefined when there are no gen_ai spans', () => {
+ const event = makeTransactionEvent([makeSpanJSON({ op: 'http.client' }), makeSpanJSON({ op: 'db.query' })]);
+
+ expect(extractGenAiSpansFromEvent(event, makeClient())).toBeUndefined();
+ expect(event.spans).toHaveLength(2);
+ });
+
+ it('returns undefined when event has no spans', () => {
+ const event = makeTransactionEvent([]);
+ expect(extractGenAiSpansFromEvent(event, makeClient())).toBeUndefined();
+ });
+
+ it('returns undefined when event is not a transaction', () => {
+ const event: Event = { type: undefined, spans: [makeSpanJSON({ op: 'gen_ai.chat' })] };
+ expect(extractGenAiSpansFromEvent(event, makeClient())).toBeUndefined();
+ });
+
+ it('returns undefined when streamGenAiSpans is not enabled', () => {
+ const event = makeTransactionEvent([makeSpanJSON({ op: 'gen_ai.chat' })]);
+ const client = makeClient({ streamGenAiSpans: false });
+
+ expect(extractGenAiSpansFromEvent(event, client)).toBeUndefined();
+ expect(event.spans).toHaveLength(1);
+ });
+
+ it('returns undefined when span streaming is enabled', () => {
+ const event = makeTransactionEvent([makeSpanJSON({ op: 'gen_ai.chat' })]);
+ const client = makeClient({ traceLifecycle: 'stream' });
+
+ expect(extractGenAiSpansFromEvent(event, client)).toBeUndefined();
+ expect(event.spans).toHaveLength(1);
+ });
+
+ it('preserves parent_span_id pointing to v1 spans', () => {
+ const genAiSpan = makeSpanJSON({
+ span_id: 'genai001',
+ parent_span_id: 'http001',
+ op: 'gen_ai.chat',
+ });
+ const httpSpan = makeSpanJSON({
+ span_id: 'http001',
+ op: 'http.client',
+ });
+
+ const event = makeTransactionEvent([httpSpan, genAiSpan]);
+ const result = extractGenAiSpansFromEvent(event, makeClient());
+
+ expect(result![1].items[0]!.parent_span_id).toBe('http001');
+ expect(event.spans).toHaveLength(1);
+ expect(event.spans![0]!.span_id).toBe('http001');
+ });
+});
diff --git a/packages/core/test/lib/tracing/spans/spanJsonToStreamedSpan.test.ts b/packages/core/test/lib/tracing/spans/spanJsonToStreamedSpan.test.ts
new file mode 100644
index 000000000000..a42ce3468e65
--- /dev/null
+++ b/packages/core/test/lib/tracing/spans/spanJsonToStreamedSpan.test.ts
@@ -0,0 +1,93 @@
+import { describe, expect, it } from 'vitest';
+import type { SpanJSON } from '../../../../src/types-hoist/span';
+import { spanJsonToSerializedStreamedSpan } from '../../../../src/tracing/spans/spanJsonToStreamedSpan';
+
+function makeSpanJSON(overrides: Partial = {}): SpanJSON {
+ return {
+ span_id: 'abc123def456789a',
+ trace_id: '00112233445566778899aabbccddeeff',
+ start_timestamp: 1000,
+ data: {},
+ ...overrides,
+ };
+}
+
+describe('spanJsonToSerializedStreamedSpan', () => {
+ it('maps basic SpanJSON fields to StreamedSpan fields', () => {
+ const span = makeSpanJSON({
+ description: 'chat gpt-4',
+ timestamp: 1005,
+ status: 'ok',
+ op: 'gen_ai.chat',
+ origin: 'auto.ai.openai',
+ parent_span_id: 'parent00deadbeef',
+ });
+
+ const result = spanJsonToSerializedStreamedSpan(span);
+
+ expect(result.name).toBe('chat gpt-4');
+ expect(result.start_timestamp).toBe(1000);
+ expect(result.end_timestamp).toBe(1005);
+ expect(result.status).toBe('ok');
+ expect(result.is_segment).toBe(false);
+ expect(result.span_id).toBe('abc123def456789a');
+ expect(result.trace_id).toBe('00112233445566778899aabbccddeeff');
+ expect(result.parent_span_id).toBe('parent00deadbeef');
+ });
+
+ it('uses empty string for name when description is undefined', () => {
+ const result = spanJsonToSerializedStreamedSpan(makeSpanJSON({ description: undefined }));
+ expect(result.name).toBe('');
+ });
+
+ it('uses start_timestamp as end_timestamp when timestamp is undefined', () => {
+ const result = spanJsonToSerializedStreamedSpan(makeSpanJSON({ timestamp: undefined }));
+ expect(result.end_timestamp).toBe(1000);
+ });
+
+ it('maps v1 status strings to v2 ok/error', () => {
+ const cases: Array<[string | undefined, 'ok' | 'error']> = [
+ [undefined, 'ok'],
+ ['ok', 'ok'],
+ ['cancelled', 'ok'],
+ ['internal_error', 'error'],
+ ['not_found', 'error'],
+ ['unknown_error', 'error'],
+ ];
+
+ for (const [v1Status, expected] of cases) {
+ const result = spanJsonToSerializedStreamedSpan(makeSpanJSON({ status: v1Status }));
+ expect(result.status).toBe(expected);
+ }
+ });
+
+ it('preserves existing span data attributes', () => {
+ const span = makeSpanJSON({
+ data: {
+ 'gen_ai.system': 'openai',
+ 'gen_ai.request.model': 'gpt-4',
+ 'gen_ai.usage.input_tokens': 100,
+ 'gen_ai.usage.output_tokens': 50,
+ },
+ });
+
+ const result = spanJsonToSerializedStreamedSpan(span);
+
+ expect(result.attributes?.['gen_ai.system']).toEqual({ type: 'string', value: 'openai' });
+ expect(result.attributes?.['gen_ai.request.model']).toEqual({ type: 'string', value: 'gpt-4' });
+ expect(result.attributes?.['gen_ai.usage.input_tokens']).toEqual({ type: 'integer', value: 100 });
+ expect(result.attributes?.['gen_ai.usage.output_tokens']).toEqual({ type: 'integer', value: 50 });
+ });
+
+ it('carries over links', () => {
+ const span = makeSpanJSON({
+ links: [{ trace_id: 'aabb', span_id: 'ccdd', sampled: true, attributes: { foo: 'bar' } }],
+ });
+
+ const result = spanJsonToSerializedStreamedSpan(span);
+
+ expect(result.links).toEqual([
+ { trace_id: 'aabb', span_id: 'ccdd', sampled: true, attributes: { foo: { type: 'string', value: 'bar' } } },
+ ]);
+ });
+});
diff --git a/packages/core/tsconfig.test.json b/packages/core/tsconfig.test.json
index 5a80d11f7055..40725a892888 100644
--- a/packages/core/tsconfig.test.json
+++ b/packages/core/tsconfig.test.json
@@ -5,7 +5,6 @@
"compilerOptions": {
"lib": ["DOM", "es2020"],
- "module": "ESNext", // support dynamic import()
// should include all types from `./tsconfig.json` plus types for all test frameworks used
"types": ["node"]
diff --git a/packages/core/tsconfig.types.json b/packages/core/tsconfig.types.json
index 65455f66bd75..de9b6935694a 100644
--- a/packages/core/tsconfig.types.json
+++ b/packages/core/tsconfig.types.json
@@ -2,6 +2,8 @@
"extends": "./tsconfig.json",
"compilerOptions": {
+ "module": "esnext",
+ "moduleResolution": "bundler",
"declaration": true,
"declarationMap": true,
"emitDeclarationOnly": true,
diff --git a/packages/effect/rollup.npm.config.mjs b/packages/effect/rollup.npm.config.mjs
index 211157646473..dde3327a2501 100644
--- a/packages/effect/rollup.npm.config.mjs
+++ b/packages/effect/rollup.npm.config.mjs
@@ -9,9 +9,11 @@ const baseConfig = makeBaseNPMConfig({
},
});
-const defaultExternal = baseConfig.external || [];
+const defaultExternal = baseConfig.external;
+const isDefaultExternal =
+ typeof defaultExternal === 'function' ? defaultExternal : id => (defaultExternal || []).includes(id);
baseConfig.external = id => {
- if (defaultExternal.includes(id)) {
+ if (isDefaultExternal(id)) {
return true;
}
diff --git a/packages/ember/tsconfig.json b/packages/ember/tsconfig.json
index e472924f4d0f..04eae83278b0 100644
--- a/packages/ember/tsconfig.json
+++ b/packages/ember/tsconfig.json
@@ -4,7 +4,7 @@
"target": "es2022",
"lib": ["DOM", "ES2022"],
"allowJs": true,
- "moduleResolution": "node",
+ "moduleResolution": "bundler",
"allowSyntheticDefaultImports": true,
"alwaysStrict": true,
"strictNullChecks": true,
diff --git a/packages/gatsby/package.json b/packages/gatsby/package.json
index d457f506e4dc..bf75a7de04fb 100644
--- a/packages/gatsby/package.json
+++ b/packages/gatsby/package.json
@@ -47,7 +47,7 @@
"dependencies": {
"@sentry/core": "10.52.0",
"@sentry/react": "10.52.0",
- "@sentry/webpack-plugin": "^5.2.0"
+ "@sentry/webpack-plugin": "^5.3.0"
},
"peerDependencies": {
"gatsby": "^3.0.0 || ^4.0.0 || ^5.0.0",
diff --git a/packages/hono/rollup.npm.config.mjs b/packages/hono/rollup.npm.config.mjs
index 2a03d7540bdc..83a43b881f1f 100644
--- a/packages/hono/rollup.npm.config.mjs
+++ b/packages/hono/rollup.npm.config.mjs
@@ -11,7 +11,7 @@ const baseConfig = makeBaseNPMConfig({
const defaultExternal = baseConfig.external;
baseConfig.external = id => {
- if (defaultExternal.includes(id)) {
+ if (defaultExternal(id)) {
return true;
}
// Mark all hono subpaths as external
diff --git a/packages/hono/src/shared/middlewareHandlers.ts b/packages/hono/src/shared/middlewareHandlers.ts
index 41902d90f84f..03bb6e16da58 100644
--- a/packages/hono/src/shared/middlewareHandlers.ts
+++ b/packages/hono/src/shared/middlewareHandlers.ts
@@ -6,6 +6,7 @@ import {
getRootSpan,
SEMANTIC_ATTRIBUTE_SENTRY_SOURCE,
updateSpanName,
+ type Scope,
winterCGRequestToRequestData,
} from '@sentry/core';
import type { Context } from 'hono';
@@ -22,6 +23,8 @@ export function requestHandler(context: Context): void {
const isolationScope = defaultScope === currentIsolationScope ? defaultScope : currentIsolationScope;
+ updateSpanRouteName(isolationScope, context);
+
isolationScope.setSDKProcessingMetadata({
normalizedRequest: winterCGRequestToRequestData(hasFetchEvent(context) ? context.event.request : context.req.raw),
});
@@ -31,21 +34,25 @@ export function requestHandler(context: Context): void {
* Response handler for Hono framework
*/
export function responseHandler(context: Context): void {
+ if (context.error && !isExpectedError(context.error)) {
+ getClient()?.captureException(context.error, {
+ mechanism: { handled: false, type: 'auto.http.hono.context_error' },
+ });
+ }
+}
+
+function updateSpanRouteName(isolationScope: Scope, context: Context): void {
const activeSpan = getActiveSpan();
+ const lastMatchedRoute = routePath(context, -1);
+
if (activeSpan) {
- activeSpan.updateName(`${context.req.method} ${routePath(context)}`);
+ activeSpan.updateName(`${context.req.method} ${lastMatchedRoute}`);
activeSpan.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, 'route');
const rootSpan = getRootSpan(activeSpan);
- updateSpanName(rootSpan, `${context.req.method} ${routePath(context)}`);
+ updateSpanName(rootSpan, `${context.req.method} ${lastMatchedRoute}`);
rootSpan.setAttribute(SEMANTIC_ATTRIBUTE_SENTRY_SOURCE, 'route');
}
- getIsolationScope().setTransactionName(`${context.req.method} ${routePath(context)}`);
-
- if (context.error && !isExpectedError(context.error)) {
- getClient()?.captureException(context.error, {
- mechanism: { handled: false, type: 'auto.http.hono.context_error' },
- });
- }
+ isolationScope.setTransactionName(`${context.req.method} ${lastMatchedRoute}`);
}
diff --git a/packages/hono/test/shared/middlewareHandlers.test.ts b/packages/hono/test/shared/middlewareHandlers.test.ts
index b8e4cdef1062..accf5fe5f91a 100644
--- a/packages/hono/test/shared/middlewareHandlers.test.ts
+++ b/packages/hono/test/shared/middlewareHandlers.test.ts
@@ -1,6 +1,6 @@
import * as SentryCore from '@sentry/core';
import { beforeEach, describe, expect, it, vi } from 'vitest';
-import { responseHandler } from '../../src/shared/middlewareHandlers';
+import { requestHandler, responseHandler } from '../../src/shared/middlewareHandlers';
vi.mock('hono/route', () => ({
routePath: () => '/test',
@@ -11,6 +11,7 @@ vi.mock('../../src/utils/hono-context', () => ({
}));
const mockSetTransactionName = vi.fn();
+const mockSetSDKProcessingMetadata = vi.fn();
vi.mock('@sentry/core', async () => {
const actual = await vi.importActual('@sentry/core');
@@ -19,6 +20,7 @@ vi.mock('@sentry/core', async () => {
getActiveSpan: vi.fn(() => null),
getIsolationScope: vi.fn(() => ({
setTransactionName: mockSetTransactionName,
+ setSDKProcessingMetadata: mockSetSDKProcessingMetadata,
})),
getClient: vi.fn(() => undefined),
};
@@ -110,7 +112,7 @@ describe('responseHandler', () => {
describe('transaction name', () => {
it('sets transaction name on isolation scope', () => {
// oxlint-disable-next-line typescript/no-explicit-any
- responseHandler(createMockContext(200) as any);
+ requestHandler(createMockContext(200) as any);
expect(mockSetTransactionName).toHaveBeenCalledWith('GET /test');
});
diff --git a/packages/integration-shims/src/BrowserTracing.ts b/packages/integration-shims/src/BrowserTracing.ts
index f92f3a63d6d8..aaafbd537ec8 100644
--- a/packages/integration-shims/src/BrowserTracing.ts
+++ b/packages/integration-shims/src/BrowserTracing.ts
@@ -1,4 +1,4 @@
-import { consoleSandbox, defineIntegration } from '@sentry/core';
+import { consoleSandbox, defineIntegration } from '@sentry/core/browser';
/**
* This is a shim for the BrowserTracing integration.
diff --git a/packages/integration-shims/src/ElementTiming.ts b/packages/integration-shims/src/ElementTiming.ts
index 8a521163f7e1..1eb43d9cf682 100644
--- a/packages/integration-shims/src/ElementTiming.ts
+++ b/packages/integration-shims/src/ElementTiming.ts
@@ -1,4 +1,4 @@
-import { consoleSandbox, defineIntegration } from '@sentry/core';
+import { consoleSandbox, defineIntegration } from '@sentry/core/browser';
/**
* This is a shim for the ElementTiming integration.
diff --git a/packages/integration-shims/src/Feedback.ts b/packages/integration-shims/src/Feedback.ts
index dd56bd273717..aa21f81494e7 100644
--- a/packages/integration-shims/src/Feedback.ts
+++ b/packages/integration-shims/src/Feedback.ts
@@ -1,5 +1,5 @@
-import type { Integration } from '@sentry/core';
-import { consoleSandbox } from '@sentry/core';
+import type { Integration } from '@sentry/core/browser';
+import { consoleSandbox } from '@sentry/core/browser';
import { FAKE_FUNCTION } from './common';
const FEEDBACK_INTEGRATION_METHODS = ['attachTo', 'createForm', 'createWidget', 'remove'] as const;
diff --git a/packages/integration-shims/src/Replay.ts b/packages/integration-shims/src/Replay.ts
index eee5cfbb2ef7..e532c49bf276 100644
--- a/packages/integration-shims/src/Replay.ts
+++ b/packages/integration-shims/src/Replay.ts
@@ -1,5 +1,5 @@
-import type { Integration } from '@sentry/core';
-import { consoleSandbox } from '@sentry/core';
+import type { Integration } from '@sentry/core/browser';
+import { consoleSandbox } from '@sentry/core/browser';
import { FAKE_FUNCTION } from './common';
const REPLAY_INTEGRATION_METHODS = ['start', 'stop', 'flush'] as const;
diff --git a/packages/integration-shims/src/SpanStreaming.ts b/packages/integration-shims/src/SpanStreaming.ts
index 7b445f086145..269770b9ab21 100644
--- a/packages/integration-shims/src/SpanStreaming.ts
+++ b/packages/integration-shims/src/SpanStreaming.ts
@@ -1,4 +1,4 @@
-import { consoleSandbox, defineIntegration } from '@sentry/core';
+import { consoleSandbox, defineIntegration } from '@sentry/core/browser';
/**
* This is a shim for the SpanStreaming integration.
diff --git a/packages/integration-shims/src/launchDarkly.ts b/packages/integration-shims/src/launchDarkly.ts
index 76750f5c863c..08203b6a74ad 100644
--- a/packages/integration-shims/src/launchDarkly.ts
+++ b/packages/integration-shims/src/launchDarkly.ts
@@ -1,4 +1,4 @@
-import { consoleSandbox, defineIntegration, isBrowser } from '@sentry/core';
+import { consoleSandbox, defineIntegration, isBrowser } from '@sentry/core/browser';
import { FAKE_FUNCTION } from './common';
/**
diff --git a/packages/integration-shims/src/logs.ts b/packages/integration-shims/src/logs.ts
index 33af020efefc..7544a80cd30d 100644
--- a/packages/integration-shims/src/logs.ts
+++ b/packages/integration-shims/src/logs.ts
@@ -1,5 +1,5 @@
-import type { Integration, ParameterizedString } from '@sentry/core';
-import { consoleSandbox, defineIntegration } from '@sentry/core';
+import type { Integration, ParameterizedString } from '@sentry/core/browser';
+import { consoleSandbox, defineIntegration } from '@sentry/core/browser';
import { FAKE_FUNCTION } from './common';
import { DEBUG_BUILD } from './debug-build';
diff --git a/packages/integration-shims/tsconfig.json b/packages/integration-shims/tsconfig.json
index bf45a09f2d71..d1a9c722679f 100644
--- a/packages/integration-shims/tsconfig.json
+++ b/packages/integration-shims/tsconfig.json
@@ -4,6 +4,7 @@
"include": ["src/**/*"],
"compilerOptions": {
+ "moduleResolution": "bundler"
// package-specific options
}
}
diff --git a/packages/nextjs/package.json b/packages/nextjs/package.json
index 50f914bd168f..a389bd02df29 100644
--- a/packages/nextjs/package.json
+++ b/packages/nextjs/package.json
@@ -80,14 +80,14 @@
"@opentelemetry/semantic-conventions": "^1.40.0",
"@rollup/plugin-commonjs": "28.0.1",
"@sentry-internal/browser-utils": "10.52.0",
- "@sentry/bundler-plugin-core": "^5.2.0",
+ "@sentry/bundler-plugin-core": "^5.3.0",
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/opentelemetry": "10.52.0",
"@sentry/react": "10.52.0",
"@sentry/vercel-edge": "10.52.0",
- "@sentry/webpack-plugin": "^5.2.0",
- "rollup": "^4.35.0",
+ "@sentry/webpack-plugin": "^5.3.0",
+ "rollup": "^4.60.3",
"stacktrace-parser": "^0.1.11"
},
"devDependencies": {
@@ -102,7 +102,7 @@
"scripts": {
"build": "run-p build:transpile build:types",
"build:dev": "yarn build",
- "build:transpile": "ts-node scripts/buildRollup.ts",
+ "build:transpile": "ts-node --project tsconfig.tsnode.json scripts/buildRollup.ts",
"build:types": "run-s build:types:core build:types:downlevel",
"build:types:core": "tsc -p tsconfig.types.json",
"build:types:downlevel": "yarn downlevel-dts build/types build/types-ts3.8 --to ts3.8",
diff --git a/packages/nextjs/src/config/getBuildPluginOptions.ts b/packages/nextjs/src/config/getBuildPluginOptions.ts
index 5018e1b4b196..e616c49263af 100644
--- a/packages/nextjs/src/config/getBuildPluginOptions.ts
+++ b/packages/nextjs/src/config/getBuildPluginOptions.ts
@@ -309,6 +309,7 @@ export function getBuildPluginOptions({
const skipSourcemapsUpload = shouldSkipSourcemapUpload(buildTool, useRunAfterProductionCompileHook);
return {
+ applicationKey: sentryBuildOptions.applicationKey,
authToken: sentryBuildOptions.authToken,
headers: sentryBuildOptions.headers,
org: sentryBuildOptions.org,
diff --git a/packages/nextjs/src/config/turbopack/constructTurbopackConfig.ts b/packages/nextjs/src/config/turbopack/constructTurbopackConfig.ts
index 31530f07c042..bf919e644754 100644
--- a/packages/nextjs/src/config/turbopack/constructTurbopackConfig.ts
+++ b/packages/nextjs/src/config/turbopack/constructTurbopackConfig.ts
@@ -66,7 +66,8 @@ export function constructTurbopackConfig({
// so it is safe even for node_modules with strict initialization order.
// We only exclude Next.js build polyfills which contain non-standard syntax that causes
// parse errors when any code is prepended (Turbopack re-parses the loader output).
- const applicationKey = userSentryOptions?._experimental?.turbopackApplicationKey;
+ // eslint-disable-next-line deprecation/deprecation
+ const applicationKey = userSentryOptions?.applicationKey ?? userSentryOptions?._experimental?.turbopackApplicationKey;
if (applicationKey && nextJsVersion && supportsTurbopackRuleCondition(nextJsVersion)) {
newConfig.rules = safelyAddTurbopackRule(newConfig.rules, {
matcher: '*.{ts,tsx,js,jsx,mjs,cjs}',
diff --git a/packages/nextjs/src/config/types.ts b/packages/nextjs/src/config/types.ts
index 86068841e773..722c3f58f570 100644
--- a/packages/nextjs/src/config/types.ts
+++ b/packages/nextjs/src/config/types.ts
@@ -463,6 +463,18 @@ export type SentryBuildOptions = {
};
};
+ /**
+ * A key that is used to identify the application in the Sentry bundler plugins.
+ * This key is used by the `thirdPartyErrorFilterIntegration` to filter out errors
+ * originating from third-party scripts.
+ *
+ * For webpack builds, this is forwarded to the `@sentry/webpack-plugin`.
+ * For Turbopack builds, this injects module metadata via a custom loader.
+ *
+ * @see https://docs.sentry.io/platforms/javascript/configuration/filtering/#using-thirdpartyerrorfilterintegration
+ */
+ applicationKey?: string;
+
/**
* Options to configure various bundle size optimizations related to the Sentry SDK.
*/
@@ -738,8 +750,10 @@ export type SentryBuildOptions = {
* webpack builds via its `moduleMetadata` / `applicationKey` option.
*
* Requires Next.js 16+
+ *
+ * @deprecated Use the top-level `applicationKey` option instead, which works for both webpack and Turbopack builds.
*/
- turbopackApplicationKey?: string;
+ turbopackApplicationKey?: string; // TODO(v11): remove this option
/**
* Options for React component name annotation in Turbopack builds.
* When enabled, JSX elements are annotated with `data-sentry-component`,
diff --git a/packages/nextjs/test/config/getBuildPluginOptions.test.ts b/packages/nextjs/test/config/getBuildPluginOptions.test.ts
index c67135a5d8d3..0b8a729e5942 100644
--- a/packages/nextjs/test/config/getBuildPluginOptions.test.ts
+++ b/packages/nextjs/test/config/getBuildPluginOptions.test.ts
@@ -61,6 +61,21 @@ describe('getBuildPluginOptions', () => {
});
});
+ it('forwards applicationKey to plugin options', () => {
+ const sentryBuildOptions: SentryBuildOptions = {
+ applicationKey: 'my-app-key',
+ };
+
+ const result = getBuildPluginOptions({
+ sentryBuildOptions,
+ releaseName: mockReleaseName,
+ distDirAbsPath: mockDistDirAbsPath,
+ buildTool: 'after-production-compile-webpack',
+ });
+
+ expect(result.applicationKey).toBe('my-app-key');
+ });
+
it('normalizes Windows paths to posix for glob patterns in after-production-compile builds', () => {
const windowsPath = 'C:\\Users\\test\\.next';
const sentryBuildOptions: SentryBuildOptions = {
diff --git a/packages/nextjs/test/config/turbopack/constructTurbopackConfig.test.ts b/packages/nextjs/test/config/turbopack/constructTurbopackConfig.test.ts
index 663317a8bd69..242174f015de 100644
--- a/packages/nextjs/test/config/turbopack/constructTurbopackConfig.test.ts
+++ b/packages/nextjs/test/config/turbopack/constructTurbopackConfig.test.ts
@@ -977,6 +977,39 @@ describe('moduleMetadataInjection with applicationKey', () => {
});
});
+ it('should add metadata loader rule when top-level applicationKey is set and Next.js >= 16', () => {
+ const userNextConfig: NextConfigObject = {};
+
+ const result = constructTurbopackConfig({
+ userNextConfig,
+ userSentryOptions: { applicationKey: 'my-top-level-key' },
+ nextJsVersion: '16.0.0',
+ });
+
+ const rule = result.rules!['*.{ts,tsx,js,jsx,mjs,cjs}'] as {
+ loaders: Array<{ loader: string; options: { applicationKey: string } }>;
+ };
+ expect(rule.loaders[0]!.options.applicationKey).toBe('my-top-level-key');
+ });
+
+ it('should prefer top-level applicationKey over deprecated _experimental.turbopackApplicationKey', () => {
+ const userNextConfig: NextConfigObject = {};
+
+ const result = constructTurbopackConfig({
+ userNextConfig,
+ userSentryOptions: {
+ applicationKey: 'top-level-key',
+ _experimental: { turbopackApplicationKey: 'deprecated-key' },
+ },
+ nextJsVersion: '16.0.0',
+ });
+
+ const rule = result.rules!['*.{ts,tsx,js,jsx,mjs,cjs}'] as {
+ loaders: Array<{ loader: string; options: { applicationKey: string } }>;
+ };
+ expect(rule.loaders[0]!.options.applicationKey).toBe('top-level-key');
+ });
+
it('should only exclude Next.js polyfills, not all foreign modules', () => {
const userNextConfig: NextConfigObject = {};
diff --git a/packages/nextjs/tsconfig.json b/packages/nextjs/tsconfig.json
index bf45a09f2d71..202590772b10 100644
--- a/packages/nextjs/tsconfig.json
+++ b/packages/nextjs/tsconfig.json
@@ -5,5 +5,7 @@
"compilerOptions": {
// package-specific options
+ "module": "esnext",
+ "moduleResolution": "bundler"
}
}
diff --git a/packages/nextjs/tsconfig.tsnode.json b/packages/nextjs/tsconfig.tsnode.json
new file mode 100644
index 000000000000..bf45a09f2d71
--- /dev/null
+++ b/packages/nextjs/tsconfig.tsnode.json
@@ -0,0 +1,9 @@
+{
+ "extends": "../../tsconfig.json",
+
+ "include": ["src/**/*"],
+
+ "compilerOptions": {
+ // package-specific options
+ }
+}
diff --git a/packages/nitro/package.json b/packages/nitro/package.json
index eccc8e5ded85..be7fbecb2f98 100644
--- a/packages/nitro/package.json
+++ b/packages/nitro/package.json
@@ -35,7 +35,7 @@
"nitro": ">=3.0.0-0 <4.0.0 || 3.0.260311-beta || 3.0.260415-beta"
},
"dependencies": {
- "@sentry/bundler-plugin-core": "^5.2.0",
+ "@sentry/bundler-plugin-core": "^5.3.0",
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/opentelemetry": "10.52.0"
diff --git a/packages/node-core/src/integrations/console.ts b/packages/node-core/src/integrations/console.ts
index d958e00bdf12..86591871999e 100644
--- a/packages/node-core/src/integrations/console.ts
+++ b/packages/node-core/src/integrations/console.ts
@@ -14,6 +14,11 @@ import {
interface ConsoleIntegrationOptions {
levels: ConsoleLevel[];
+ /**
+ * Filter out console messages that match the given strings or regular expressions.
+ * These will neither be passed to the handler, and they will also not be logged to the user, unless they have debug enabled.
+ */
+ filter?: (string | RegExp)[];
}
/**
@@ -34,12 +39,23 @@ export const consoleIntegration = defineIntegration((options: Partial ({
+ getSystemErrorMap: vi.fn() as ReturnType | undefined,
+}));
+
+vi.mock('node:util', async importOriginal => {
+ const actual = (await importOriginal()) as typeof nodeUtil;
+ mocks.getSystemErrorMap = vi.fn(actual.getSystemErrorMap);
+ return {
+ ...actual,
+ get getSystemErrorMap() {
+ return mocks.getSystemErrorMap;
+ },
+ };
+});
+
+import * as util from 'node:util';
+
+describe('systemErrorIntegration', () => {
+ afterEach(() => {
+ vi.mocked(util.getSystemErrorMap).mockRestore();
+ });
+
+ function createClient(sendDefaultPii = false): Client {
+ return {
+ getOptions: () => ({ sendDefaultPii }),
+ } as unknown as Client;
+ }
+
+ it('returns the event unchanged when util.getSystemErrorMap is undefined (e.g. Bun)', () => {
+ const originalFn = mocks.getSystemErrorMap;
+ mocks.getSystemErrorMap = undefined;
+
+ try {
+ const integration = systemErrorIntegration();
+ const error = Object.assign(new Error('boom'), { errno: -2, path: '/some/path' });
+ const event = { exception: { values: [{ value: error.message }] } } as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient()) as Event;
+
+ expect(result).toBe(event);
+ expect(result.contexts?.node_system_error).toBeUndefined();
+ } finally {
+ mocks.getSystemErrorMap = originalFn;
+ }
+ });
+
+ it('adds node_system_error context for a real SystemError', () => {
+ const errno = -2;
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[errno, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration();
+ const error = Object.assign(new Error("ENOENT: no such file or directory, open '/secret/path'"), {
+ errno,
+ path: '/secret/path',
+ });
+ const event = { exception: { values: [{ value: error.message }] } } as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient()) as Event;
+
+ expect(result.contexts?.node_system_error).toEqual({ errno });
+ expect(result.exception?.values?.[0]?.value).not.toContain('/secret/path');
+ });
+
+ it('keeps path in context when sendDefaultPii is true', () => {
+ const errno = -2;
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[errno, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration();
+ const error = Object.assign(new Error('boom'), { errno, path: '/secret/path' });
+ const event = { exception: { values: [{ value: error.message }] } } as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient(true)) as Event;
+
+ expect(result.contexts?.node_system_error).toEqual({ errno, path: '/secret/path' });
+ });
+
+ it('keeps path in context when includePaths option is true', () => {
+ const errno = -2;
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[errno, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration({ includePaths: true });
+ const error = Object.assign(new Error('boom'), { errno, path: '/secret/path' });
+ const event = { exception: { values: [{ value: error.message }] } } as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient()) as Event;
+
+ expect(result.contexts?.node_system_error).toEqual({ errno, path: '/secret/path' });
+ });
+
+ it('returns the event unchanged when the error has no errno', () => {
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[-2, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration();
+ const error = new Error('not a system error');
+ const event = {} as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient()) as Event;
+
+ expect(result?.contexts?.node_system_error).toBeUndefined();
+ });
+
+ it('returns the event unchanged when originalException is not an Error', () => {
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[-2, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration();
+ const event = {} as Event;
+
+ const result = integration.processEvent!(event, { originalException: 'not an error' }, createClient()) as Event;
+
+ expect(result.contexts?.node_system_error).toBeUndefined();
+ });
+
+ it('returns the event unchanged when errno is not in the system error map', () => {
+ vi.mocked(util.getSystemErrorMap).mockReturnValue(
+ new Map([[-2, ['ENOENT', 'no such file or directory']]]),
+ );
+
+ const integration = systemErrorIntegration();
+ const error = Object.assign(new Error('unknown'), { errno: 99999 });
+ const event = {} as Event;
+
+ const result = integration.processEvent!(event, { originalException: error }, createClient()) as Event;
+
+ expect(result.contexts?.node_system_error).toBeUndefined();
+ });
+});
diff --git a/packages/node-native/package.json b/packages/node-native/package.json
index 6946cc08f274..ce36a1a8e94b 100644
--- a/packages/node-native/package.json
+++ b/packages/node-native/package.json
@@ -62,7 +62,7 @@
"build:tarball": "npm pack"
},
"dependencies": {
- "@sentry-internal/node-native-stacktrace": "^0.4.0",
+ "@sentry-internal/node-native-stacktrace": "^0.5.0",
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0"
},
diff --git a/packages/nuxt/package.json b/packages/nuxt/package.json
index 9f3e89924965..14bbdc25c0ae 100644
--- a/packages/nuxt/package.json
+++ b/packages/nuxt/package.json
@@ -60,8 +60,8 @@
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/node-core": "10.52.0",
- "@sentry/rollup-plugin": "^5.2.0",
- "@sentry/vite-plugin": "^5.2.0",
+ "@sentry/rollup-plugin": "^5.3.0",
+ "@sentry/vite-plugin": "^5.3.0",
"@sentry/vue": "10.52.0",
"local-pkg": "^1.1.2"
},
diff --git a/packages/nuxt/src/vite/sourceMaps.ts b/packages/nuxt/src/vite/sourceMaps.ts
index 16d0fd330649..333a22e4e189 100644
--- a/packages/nuxt/src/vite/sourceMaps.ts
+++ b/packages/nuxt/src/vite/sourceMaps.ts
@@ -171,6 +171,7 @@ export function getPluginOptions(
}
return {
+ applicationKey: moduleOptions.applicationKey,
// eslint-disable-next-line deprecation/deprecation
org: moduleOptions.org ?? sourceMapsUploadOptions.org ?? process.env.SENTRY_ORG,
// eslint-disable-next-line deprecation/deprecation
diff --git a/packages/nuxt/test/vite/sourceMaps.test.ts b/packages/nuxt/test/vite/sourceMaps.test.ts
index 28e0336f43f5..ea80f32efd89 100644
--- a/packages/nuxt/test/vite/sourceMaps.test.ts
+++ b/packages/nuxt/test/vite/sourceMaps.test.ts
@@ -231,6 +231,18 @@ describe('getPluginOptions', () => {
});
});
+ it('passes applicationKey to plugin options', () => {
+ const options: SentryNuxtModuleOptions = {
+ applicationKey: 'my-app-key',
+ };
+
+ const result = getPluginOptions(options);
+
+ expect(result).toMatchObject({
+ applicationKey: 'my-app-key',
+ });
+ });
+
it('supports bundleSizeOptimizations', () => {
const options: SentryNuxtModuleOptions = {
bundleSizeOptimizations: {
diff --git a/packages/opentelemetry/src/spanExporter.ts b/packages/opentelemetry/src/spanExporter.ts
index f02df1d9d56c..aed57b52e58e 100644
--- a/packages/opentelemetry/src/spanExporter.ts
+++ b/packages/opentelemetry/src/spanExporter.ts
@@ -203,8 +203,11 @@ export class SentrySpanExporter {
// We'll recursively add all the child spans to this array
const spans = transactionEvent.spans || [];
+ let hasGenAiSpans = false;
for (const child of root.children) {
- createAndFinishSpanForOtelSpan(child, spans, sentSpans);
+ if (createAndFinishSpanForOtelSpan(child, spans, sentSpans)) {
+ hasGenAiSpans = true;
+ }
}
// spans.sort() mutates the array, but we do not use this anymore after this point
@@ -214,6 +217,13 @@ export class SentrySpanExporter {
? spans.sort((a, b) => a.start_timestamp - b.start_timestamp).slice(0, MAX_SPAN_COUNT)
: spans;
+ if (hasGenAiSpans) {
+ transactionEvent.sdkProcessingMetadata = {
+ ...transactionEvent.sdkProcessingMetadata,
+ hasGenAiSpans: true,
+ };
+ }
+
const measurements = timedEventsToMeasurements(span.events);
if (measurements) {
transactionEvent.measurements = measurements;
@@ -330,7 +340,10 @@ export function createTransactionForOtelSpan(span: ReadableSpan): TransactionEve
return transactionEvent;
}
-function createAndFinishSpanForOtelSpan(node: SpanNode, spans: SpanJSON[], sentSpans: Set): void {
+/**
+ * Returns `true` if this span or any descendant is a gen_ai span.
+ */
+function createAndFinishSpanForOtelSpan(node: SpanNode, spans: SpanJSON[], sentSpans: Set): boolean {
const span = node.span;
if (span) {
@@ -341,10 +354,13 @@ function createAndFinishSpanForOtelSpan(node: SpanNode, spans: SpanJSON[], sentS
// If this span should be dropped, we still want to create spans for the children of this
if (shouldDrop) {
+ let hasGenAiSpans = false;
node.children.forEach(child => {
- createAndFinishSpanForOtelSpan(child, spans, sentSpans);
+ if (createAndFinishSpanForOtelSpan(child, spans, sentSpans)) {
+ hasGenAiSpans = true;
+ }
});
- return;
+ return hasGenAiSpans;
}
const span_id = span.spanContext().spanId;
@@ -381,9 +397,13 @@ function createAndFinishSpanForOtelSpan(node: SpanNode, spans: SpanJSON[], sentS
spans.push(spanJSON);
+ let hasGenAiSpans = !!op?.startsWith('gen_ai.');
node.children.forEach(child => {
- createAndFinishSpanForOtelSpan(child, spans, sentSpans);
+ if (createAndFinishSpanForOtelSpan(child, spans, sentSpans)) {
+ hasGenAiSpans = true;
+ }
});
+ return hasGenAiSpans;
}
function getSpanData(span: ReadableSpan): {
diff --git a/packages/profiling-node/package.json b/packages/profiling-node/package.json
index b696718bc4dc..1abf60a20e2c 100644
--- a/packages/profiling-node/package.json
+++ b/packages/profiling-node/package.json
@@ -61,7 +61,7 @@
"test:watch": "vitest --watch"
},
"dependencies": {
- "@sentry-internal/node-cpu-profiler": "^2.2.0",
+ "@sentry-internal/node-cpu-profiler": "^2.4.0",
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0"
},
diff --git a/packages/profiling-node/scripts/prune-profiler-binaries.js b/packages/profiling-node/scripts/prune-profiler-binaries.js
index 11e8dc7f05f4..fc45fbcbe35a 100755
--- a/packages/profiling-node/scripts/prune-profiler-binaries.js
+++ b/packages/profiling-node/scripts/prune-profiler-binaries.js
@@ -68,6 +68,7 @@ const NODE_TO_ABI = {
20: '115',
22: '127',
24: '137',
+ 26: '147',
};
if (NODE) {
@@ -83,6 +84,8 @@ if (NODE) {
NODE = NODE_TO_ABI['22'];
} else if (NODE.startsWith('24')) {
NODE = NODE_TO_ABI['24'];
+ } else if (NODE.startsWith('26')) {
+ NODE = NODE_TO_ABI['26'];
} else {
ARGV_ERRORS.push(
`❌ Sentry: Invalid node version passed as argument, please make sure --target_node is a valid major node version. Supported versions are ${Object.keys(
diff --git a/packages/profiling-node/src/integration.ts b/packages/profiling-node/src/integration.ts
index 4cb51ac540b5..d5d3580a30f6 100644
--- a/packages/profiling-node/src/integration.ts
+++ b/packages/profiling-node/src/integration.ts
@@ -639,7 +639,7 @@ class ContinuousProfiler {
/** Exported only for tests. */
export const _nodeProfilingIntegration = ((): ProfilingIntegration => {
- if (![16, 18, 20, 22, 24].includes(NODE_MAJOR)) {
+ if (![16, 18, 20, 22, 24, 26].includes(NODE_MAJOR)) {
consoleSandbox(() => {
// eslint-disable-next-line no-console
console.warn(
diff --git a/packages/react-router/package.json b/packages/react-router/package.json
index 19b76c1c8c73..da668dfd3a37 100644
--- a/packages/react-router/package.json
+++ b/packages/react-router/package.json
@@ -54,7 +54,7 @@
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/react": "10.52.0",
- "@sentry/vite-plugin": "^5.2.0",
+ "@sentry/vite-plugin": "^5.3.0",
"glob": "^13.0.6"
},
"devDependencies": {
diff --git a/packages/react-router/src/vite/makeCustomSentryVitePlugins.ts b/packages/react-router/src/vite/makeCustomSentryVitePlugins.ts
index fcec109c6baa..69b07e1da28f 100644
--- a/packages/react-router/src/vite/makeCustomSentryVitePlugins.ts
+++ b/packages/react-router/src/vite/makeCustomSentryVitePlugins.ts
@@ -10,6 +10,7 @@ export async function makeCustomSentryVitePlugins(options: SentryReactRouterBuil
debug,
unstable_sentryVitePluginOptions,
bundleSizeOptimizations,
+ applicationKey,
authToken,
org,
project,
@@ -19,6 +20,7 @@ export async function makeCustomSentryVitePlugins(options: SentryReactRouterBuil
} = options;
const sentryVitePlugins = sentryVitePlugin({
+ applicationKey,
authToken: authToken ?? process.env.SENTRY_AUTH_TOKEN,
bundleSizeOptimizations,
debug: debug ?? false,
diff --git a/packages/react-router/test/vite/makeCustomSentryVitePlugins.test.ts b/packages/react-router/test/vite/makeCustomSentryVitePlugins.test.ts
index b98a6ebfb80d..c38e80ef72df 100644
--- a/packages/react-router/test/vite/makeCustomSentryVitePlugins.test.ts
+++ b/packages/react-router/test/vite/makeCustomSentryVitePlugins.test.ts
@@ -48,6 +48,18 @@ describe('makeCustomSentryVitePlugins', () => {
);
});
+ it('should pass applicationKey to sentryVitePlugin', async () => {
+ await makeCustomSentryVitePlugins({
+ applicationKey: 'my-app-key',
+ });
+
+ expect(sentryVitePlugin).toHaveBeenCalledWith(
+ expect.objectContaining({
+ applicationKey: 'my-app-key',
+ }),
+ );
+ });
+
it('should return all plugins from sentryVitePlugin', async () => {
const plugins = await makeCustomSentryVitePlugins({});
expect(plugins).toHaveLength(1);
diff --git a/packages/react/src/error.ts b/packages/react/src/error.ts
index c10cebe4ecf4..ce6d7a9cd2d5 100644
--- a/packages/react/src/error.ts
+++ b/packages/react/src/error.ts
@@ -1,5 +1,5 @@
import { captureException, withScope } from '@sentry/browser';
-import { isError } from '@sentry/core';
+import { isError } from '@sentry/core/browser';
import type { ErrorInfo } from 'react';
import { version } from 'react';
diff --git a/packages/react/src/reactrouter-compat-utils/route-manifest.ts b/packages/react/src/reactrouter-compat-utils/route-manifest.ts
index bdc49f76705e..3f8c83a27c94 100644
--- a/packages/react/src/reactrouter-compat-utils/route-manifest.ts
+++ b/packages/react/src/reactrouter-compat-utils/route-manifest.ts
@@ -1,4 +1,4 @@
-import { debug } from '@sentry/core';
+import { debug } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
/**
diff --git a/packages/react/src/reactrouter-compat-utils/utils.ts b/packages/react/src/reactrouter-compat-utils/utils.ts
index 11a6d3609eaa..a43288823070 100644
--- a/packages/react/src/reactrouter-compat-utils/utils.ts
+++ b/packages/react/src/reactrouter-compat-utils/utils.ts
@@ -1,5 +1,5 @@
-import type { Span, TransactionSource } from '@sentry/core';
-import { debug, getActiveSpan, getRootSpan, spanToJSON } from '@sentry/core';
+import type { Span, TransactionSource } from '@sentry/core/browser';
+import { debug, getActiveSpan, getRootSpan, spanToJSON } from '@sentry/core/browser';
import { DEBUG_BUILD } from '../debug-build';
import type { Location, MatchRoutes, RouteMatch, RouteObject } from '../types';
import { matchRouteManifest, stripBasenameFromPathname } from './route-manifest';
diff --git a/packages/react/src/reactrouterv3.ts b/packages/react/src/reactrouterv3.ts
index f49948a2a74b..cdf97c02d03b 100644
--- a/packages/react/src/reactrouterv3.ts
+++ b/packages/react/src/reactrouterv3.ts
@@ -4,12 +4,12 @@ import {
startBrowserTracingPageLoadSpan,
WINDOW,
} from '@sentry/browser';
-import type { Integration, TransactionSource } from '@sentry/core';
+import type { Integration, TransactionSource } from '@sentry/core/browser';
import {
SEMANTIC_ATTRIBUTE_SENTRY_OP,
SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN,
SEMANTIC_ATTRIBUTE_SENTRY_SOURCE,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { Location } from './types';
// Many of the types below had to be mocked out to prevent typescript issues
diff --git a/packages/react/src/redux.ts b/packages/react/src/redux.ts
index b04510a68cc9..00f556f52cd7 100644
--- a/packages/react/src/redux.ts
+++ b/packages/react/src/redux.ts
@@ -1,12 +1,12 @@
/* eslint-disable @typescript-eslint/no-explicit-any */
-import type { Scope } from '@sentry/core';
+import type { Scope } from '@sentry/core/browser';
import {
addBreadcrumb,
getClient,
getCurrentScope,
getGlobalScope,
setNormalizationDepthOverrideHint,
-} from '@sentry/core';
+} from '@sentry/core/browser';
interface Action {
type: T;
diff --git a/packages/react/src/sdk.ts b/packages/react/src/sdk.ts
index 844bc30f1785..03981effc147 100644
--- a/packages/react/src/sdk.ts
+++ b/packages/react/src/sdk.ts
@@ -1,7 +1,7 @@
import type { BrowserOptions } from '@sentry/browser';
import { init as browserInit, setContext } from '@sentry/browser';
-import type { Client } from '@sentry/core';
-import { applySdkMetadata } from '@sentry/core';
+import type { Client } from '@sentry/core/browser';
+import { applySdkMetadata } from '@sentry/core/browser';
import { version } from 'react';
/**
diff --git a/packages/react/src/tanstackrouter.ts b/packages/react/src/tanstackrouter.ts
index d2424697d9d5..54b24b3d8490 100644
--- a/packages/react/src/tanstackrouter.ts
+++ b/packages/react/src/tanstackrouter.ts
@@ -4,12 +4,12 @@ import {
startBrowserTracingPageLoadSpan,
WINDOW,
} from '@sentry/browser';
-import type { Integration } from '@sentry/core';
+import type { Integration } from '@sentry/core/browser';
import {
SEMANTIC_ATTRIBUTE_SENTRY_OP,
SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN,
SEMANTIC_ATTRIBUTE_SENTRY_SOURCE,
-} from '@sentry/core';
+} from '@sentry/core/browser';
import type { VendoredTanstackRouter, VendoredTanstackRouterRouteMatch } from './vendor/tanstackrouter-types';
/**
diff --git a/packages/react/test/profiler.test.tsx b/packages/react/test/profiler.test.tsx
index 1e88479ffffe..5150975212ee 100644
--- a/packages/react/test/profiler.test.tsx
+++ b/packages/react/test/profiler.test.tsx
@@ -1,8 +1,8 @@
/**
* @vitest-environment jsdom
*/
-import type { StartSpanOptions } from '@sentry/core';
-import { SentrySpan } from '@sentry/core';
+import type { StartSpanOptions } from '@sentry/core/browser';
+import { SentrySpan } from '@sentry/core/browser';
import { render } from '@testing-library/react';
import { renderHook } from '@testing-library/react-hooks';
import * as React from 'react';
diff --git a/packages/react/test/reactrouter-cross-usage.test.tsx b/packages/react/test/reactrouter-cross-usage.test.tsx
index c158f831c381..a5c797af8bc9 100644
--- a/packages/react/test/reactrouter-cross-usage.test.tsx
+++ b/packages/react/test/reactrouter-cross-usage.test.tsx
@@ -63,7 +63,7 @@ vi.mock('@sentry/browser', async requireActual => {
};
});
-vi.mock('@sentry/core', async requireActual => {
+async function coreMock(requireActual: () => Promise) {
const actual = (await requireActual()) as any;
return {
...actual,
@@ -81,7 +81,10 @@ vi.mock('@sentry/core', async requireActual => {
return span;
},
};
-});
+}
+
+vi.mock('@sentry/core', coreMock);
+vi.mock('@sentry/core/browser', coreMock);
describe('React Router cross usage of wrappers', () => {
function createMockBrowserClient(): BrowserClient {
diff --git a/packages/react/test/reactrouter-descendant-routes.test.tsx b/packages/react/test/reactrouter-descendant-routes.test.tsx
index a08893694a30..1172ee8d5d3e 100644
--- a/packages/react/test/reactrouter-descendant-routes.test.tsx
+++ b/packages/react/test/reactrouter-descendant-routes.test.tsx
@@ -58,14 +58,17 @@ vi.mock('@sentry/browser', async requireActual => {
};
});
-vi.mock('@sentry/core', async requireActual => {
+async function coreMock(requireActual: () => Promise) {
return {
...(await requireActual()),
getRootSpan: () => {
return mockRootSpan;
},
};
-});
+}
+
+vi.mock('@sentry/core', coreMock);
+vi.mock('@sentry/core/browser', coreMock);
describe('React Router Descendant Routes', () => {
function createMockBrowserClient(): BrowserClient {
diff --git a/packages/react/test/reactrouterv6.test.tsx b/packages/react/test/reactrouterv6.test.tsx
index fda5043d2e6a..d439e76697d7 100644
--- a/packages/react/test/reactrouterv6.test.tsx
+++ b/packages/react/test/reactrouterv6.test.tsx
@@ -62,14 +62,17 @@ vi.mock('@sentry/browser', async requireActual => {
};
});
-vi.mock('@sentry/core', async requireActual => {
+async function coreMock(requireActual: () => Promise) {
return {
...(await requireActual()),
getRootSpan: () => {
return mockRootSpan;
},
};
-});
+}
+
+vi.mock('@sentry/core', coreMock);
+vi.mock('@sentry/core/browser', coreMock);
describe('reactRouterV6BrowserTracingIntegration', () => {
function createMockBrowserClient(): BrowserClient {
diff --git a/packages/react/test/redux.test.ts b/packages/react/test/redux.test.ts
index 60411418ca6a..09bcfd6ec9c2 100644
--- a/packages/react/test/redux.test.ts
+++ b/packages/react/test/redux.test.ts
@@ -1,5 +1,5 @@
import * as Sentry from '@sentry/browser';
-import * as SentryCore from '@sentry/core';
+import * as SentryCore from '@sentry/core/browser';
import * as Redux from 'redux';
import type { Mock } from 'vitest';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
@@ -8,7 +8,7 @@ import { createReduxEnhancer } from '../src/redux';
const mockSetContext = vi.fn();
const mockGlobalScopeAddEventProcessor = vi.fn();
-vi.mock('@sentry/core', async requireActual => ({
+vi.mock('@sentry/core/browser', async requireActual => ({
...(await requireActual()),
getCurrentScope() {
return {
diff --git a/packages/react/tsconfig.json b/packages/react/tsconfig.json
index 41ff3c42258e..1b28c0b04eb9 100644
--- a/packages/react/tsconfig.json
+++ b/packages/react/tsconfig.json
@@ -5,6 +5,8 @@
"compilerOptions": {
"lib": ["DOM", "es2020"],
+ "module": "esnext",
+ "moduleResolution": "bundler",
// package-specific options
"esModuleInterop": true,
"jsx": "react"
diff --git a/packages/remix/tsconfig.json b/packages/remix/tsconfig.json
index f1f9d9ccc513..a1752fde9b92 100644
--- a/packages/remix/tsconfig.json
+++ b/packages/remix/tsconfig.json
@@ -5,6 +5,7 @@
"compilerOptions": {
"jsx": "react",
- "module": "es2020"
+ "module": "esnext",
+ "moduleResolution": "bundler"
}
}
diff --git a/packages/replay-canvas/package.json b/packages/replay-canvas/package.json
index 24f77167c532..fe38f47fe2fa 100644
--- a/packages/replay-canvas/package.json
+++ b/packages/replay-canvas/package.json
@@ -65,7 +65,7 @@
},
"homepage": "https://docs.sentry.io/platforms/javascript/session-replay/",
"devDependencies": {
- "@sentry-internal/rrweb": "2.40.0"
+ "@sentry-internal/rrweb": "2.42.0"
},
"dependencies": {
"@sentry-internal/replay": "10.52.0",
diff --git a/packages/replay-internal/package.json b/packages/replay-internal/package.json
index 721ed34acb91..12326474494f 100644
--- a/packages/replay-internal/package.json
+++ b/packages/replay-internal/package.json
@@ -79,8 +79,8 @@
"devDependencies": {
"@babel/core": "^7.27.7",
"@sentry-internal/replay-worker": "10.52.0",
- "@sentry-internal/rrweb": "2.40.0",
- "@sentry-internal/rrweb-snapshot": "2.40.0",
+ "@sentry-internal/rrweb": "2.42.0",
+ "@sentry-internal/rrweb-snapshot": "2.42.0",
"fflate": "0.8.2",
"jest-matcher-utils": "^29.0.0",
"jsdom-worker": "^0.3.0"
diff --git a/packages/replay-internal/src/coreHandlers/handleNetworkBreadcrumbs.ts b/packages/replay-internal/src/coreHandlers/handleNetworkBreadcrumbs.ts
index 6a2e49bfa5b9..29dc5ffd9721 100644
--- a/packages/replay-internal/src/coreHandlers/handleNetworkBreadcrumbs.ts
+++ b/packages/replay-internal/src/coreHandlers/handleNetworkBreadcrumbs.ts
@@ -95,6 +95,6 @@ function _isXhrHint(hint?: BreadcrumbHint): hint is XhrHint {
return hint?.xhr;
}
-function _isFetchHint(hint?: BreadcrumbHint): hint is FetchHint {
- return hint?.response;
+function _isFetchHint(hint?: BreadcrumbHint): hint is Partial {
+ return hint?.input !== undefined;
}
diff --git a/packages/replay-internal/test/unit/coreHandlers/handleNetworkBreadcrumbs.test.ts b/packages/replay-internal/test/unit/coreHandlers/handleNetworkBreadcrumbs.test.ts
index b88b4d1a2358..ddc5a8d7ff6d 100644
--- a/packages/replay-internal/test/unit/coreHandlers/handleNetworkBreadcrumbs.test.ts
+++ b/packages/replay-internal/test/unit/coreHandlers/handleNetworkBreadcrumbs.test.ts
@@ -359,6 +359,56 @@ other-header: test`;
]);
});
+ it('handles fetch breadcrumb for aborted request (no response)', async () => {
+ const breadcrumb: Breadcrumb = {
+ category: 'fetch',
+ level: 'error',
+ data: {
+ method: 'GET',
+ url: 'https://example.com',
+ },
+ };
+
+ const hint: FetchBreadcrumbHint = {
+ data: new Error('The operation was aborted'),
+ input: ['GET', {}],
+ startTimestamp: BASE_TIMESTAMP + 1000,
+ endTimestamp: BASE_TIMESTAMP + 2000,
+ };
+ beforeAddNetworkBreadcrumb(options, breadcrumb, hint);
+
+ expect(breadcrumb).toEqual({
+ category: 'fetch',
+ level: 'error',
+ data: {
+ method: 'GET',
+ url: 'https://example.com',
+ },
+ });
+
+ await waitForReplayEventBuffer();
+
+ expect((options.replay.eventBuffer as EventBufferArray).events).toEqual([
+ {
+ type: 5,
+ timestamp: (BASE_TIMESTAMP + 1000) / 1000,
+ data: {
+ tag: 'performanceSpan',
+ payload: {
+ data: {
+ method: 'GET',
+ statusCode: 0,
+ },
+ description: 'https://example.com',
+ endTimestamp: (BASE_TIMESTAMP + 2000) / 1000,
+ op: 'resource.fetch',
+ startTimestamp: (BASE_TIMESTAMP + 1000) / 1000,
+ },
+ },
+ },
+ ]);
+ });
+
it('parses fetch response body if necessary', async () => {
const breadcrumb: Breadcrumb = {
category: 'fetch',
diff --git a/packages/solidstart/package.json b/packages/solidstart/package.json
index f7be14121d07..07cab4191866 100644
--- a/packages/solidstart/package.json
+++ b/packages/solidstart/package.json
@@ -69,7 +69,7 @@
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/solid": "10.52.0",
- "@sentry/vite-plugin": "^5.2.0"
+ "@sentry/vite-plugin": "^5.3.0"
},
"devDependencies": {
"@solidjs/router": "^0.15.0",
diff --git a/packages/solidstart/test/server/errorboundary.test.tsx b/packages/solidstart/test/server/errorboundary.test.tsx
index 3ed39bbfea13..7d10e3a14e1e 100644
--- a/packages/solidstart/test/server/errorboundary.test.tsx
+++ b/packages/solidstart/test/server/errorboundary.test.tsx
@@ -1,5 +1,7 @@
/* eslint-disable @typescript-eslint/unbound-method */
+import type * as SentryCoreBrowser from '@sentry/core/browser';
import type * as SentryCore from '@sentry/core';
+
import { createTransport, getCurrentScope, setCurrentClient } from '@sentry/core';
import { render } from '@solidjs/testing-library';
import userEvent from '@testing-library/user-event';
@@ -7,14 +9,17 @@ import { ErrorBoundary } from 'solid-js';
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest';
import { NodeClient, withSentryErrorBoundary } from '../../src/server';
+// mock both old combined @sentry/core and @sentry/core/browser to be safe.
const mockCaptureException = vi.fn();
-vi.mock('@sentry/core', async () => {
- const actual = await vi.importActual('@sentry/core');
+async function mockCore(importActual: () => Promise) {
+ const actual = await importActual();
return {
...actual,
captureException: (...args) => mockCaptureException(...args),
} as typeof SentryCore;
-});
+}
+vi.mock('@sentry/core/browser', mockCore);
+vi.mock('@sentry/core', mockCore);
const user = userEvent.setup();
const SentryErrorBoundary = withSentryErrorBoundary(ErrorBoundary);
diff --git a/packages/solidstart/tsconfig.json b/packages/solidstart/tsconfig.json
index b0eb9ecb6476..78a97ddc222b 100644
--- a/packages/solidstart/tsconfig.json
+++ b/packages/solidstart/tsconfig.json
@@ -3,5 +3,8 @@
"include": ["src/**/*"],
- "compilerOptions": {}
+ "compilerOptions": {
+ "module": "esnext",
+ "moduleResolution": "bundler"
+ }
}
diff --git a/packages/sveltekit/package.json b/packages/sveltekit/package.json
index 829737f6ad40..e63e43608911 100644
--- a/packages/sveltekit/package.json
+++ b/packages/sveltekit/package.json
@@ -51,7 +51,7 @@
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/svelte": "10.52.0",
- "@sentry/vite-plugin": "^5.2.0",
+ "@sentry/vite-plugin": "^5.3.0",
"@sveltejs/acorn-typescript": "^1.0.9",
"acorn": "^8.14.0",
"magic-string": "~0.30.0",
diff --git a/packages/sveltekit/src/server-common/handle.ts b/packages/sveltekit/src/server-common/handle.ts
index 26872a0f6f24..c0e33c49c031 100644
--- a/packages/sveltekit/src/server-common/handle.ts
+++ b/packages/sveltekit/src/server-common/handle.ts
@@ -128,7 +128,6 @@ async function instrumentHandle(
// to avoid doing the dynamic import on every request
if (options.injectFetchProxyScript == null) {
try {
- // @ts-expect-error - the dynamic import is fine here
const { VERSION } = await import('@sveltejs/kit');
options.injectFetchProxyScript = isFetchProxyRequired(VERSION);
} catch {
diff --git a/packages/sveltekit/src/vite/svelteConfig.ts b/packages/sveltekit/src/vite/svelteConfig.ts
index ae0a29a25243..b8f439092bee 100644
--- a/packages/sveltekit/src/vite/svelteConfig.ts
+++ b/packages/sveltekit/src/vite/svelteConfig.ts
@@ -39,7 +39,6 @@ export async function loadSvelteConfig(): Promise {
expect(result).toBeNull();
});
+ it('passes applicationKey through to vite plugin options', () => {
+ const originalEnv = process.env.NODE_ENV;
+ process.env.NODE_ENV = 'production';
+
+ const options: SentrySvelteKitPluginOptions = {
+ autoUploadSourceMaps: true,
+ applicationKey: 'my-app-key',
+ };
+ const result = generateVitePluginOptions(options);
+ expect(result).toEqual(expect.objectContaining({ applicationKey: 'my-app-key' }));
+
+ process.env.NODE_ENV = originalEnv;
+ });
+
it('uses default `debug` value if only default options are provided', () => {
const originalEnv = process.env.NODE_ENV;
process.env.NODE_ENV = 'production'; // Ensure we're not in development mode
diff --git a/packages/sveltekit/tsconfig.json b/packages/sveltekit/tsconfig.json
index bf45a09f2d71..0b788fab5eae 100644
--- a/packages/sveltekit/tsconfig.json
+++ b/packages/sveltekit/tsconfig.json
@@ -4,6 +4,8 @@
"include": ["src/**/*"],
"compilerOptions": {
+ "module": "esnext",
+ "moduleResolution": "bundler"
// package-specific options
}
}
diff --git a/packages/tanstackstart-react/package.json b/packages/tanstackstart-react/package.json
index b53254b5d728..a6dd0076d920 100644
--- a/packages/tanstackstart-react/package.json
+++ b/packages/tanstackstart-react/package.json
@@ -69,7 +69,7 @@
"@sentry/core": "10.52.0",
"@sentry/node": "10.52.0",
"@sentry/react": "10.52.0",
- "@sentry/vite-plugin": "^5.2.0"
+ "@sentry/vite-plugin": "^5.3.0"
},
"devDependencies": {
"vite": "^5.4.11"
diff --git a/packages/tanstackstart-react/src/vite/sourceMaps.ts b/packages/tanstackstart-react/src/vite/sourceMaps.ts
index 296e8582cde8..38c4e8750bd6 100644
--- a/packages/tanstackstart-react/src/vite/sourceMaps.ts
+++ b/packages/tanstackstart-react/src/vite/sourceMaps.ts
@@ -9,6 +9,7 @@ type FilesToDeleteAfterUpload = string | string[] | undefined;
*/
export function makeAddSentryVitePlugin(options: BuildTimeOptionsBase): Plugin[] {
const {
+ applicationKey,
authToken,
bundleSizeOptimizations,
debug,
@@ -52,6 +53,7 @@ export function makeAddSentryVitePlugin(options: BuildTimeOptionsBase): Plugin[]
};
const sentryPlugins = sentryVitePlugin({
+ applicationKey,
authToken: authToken ?? process.env.SENTRY_AUTH_TOKEN,
bundleSizeOptimizations: bundleSizeOptimizations ?? undefined,
debug: debug ?? false,
diff --git a/packages/tanstackstart-react/test/vite/sourceMaps.test.ts b/packages/tanstackstart-react/test/vite/sourceMaps.test.ts
index 58567f085b72..672a94892a58 100644
--- a/packages/tanstackstart-react/test/vite/sourceMaps.test.ts
+++ b/packages/tanstackstart-react/test/vite/sourceMaps.test.ts
@@ -98,6 +98,18 @@ describe('makeAddSentryVitePlugin()', () => {
);
});
+ it('passes applicationKey to sentryVitePlugin', () => {
+ makeAddSentryVitePlugin({
+ applicationKey: 'my-app-key',
+ });
+
+ expect(sentryVitePluginSpy).toHaveBeenCalledWith(
+ expect.objectContaining({
+ applicationKey: 'my-app-key',
+ }),
+ );
+ });
+
it('returns Sentry Vite plugins and config plugin', () => {
const plugins = makeAddSentryVitePlugin({
org: 'my-org',
diff --git a/packages/tanstackstart-react/tsconfig.json b/packages/tanstackstart-react/tsconfig.json
index 9399ef75ead6..c0a6a184ec03 100644
--- a/packages/tanstackstart-react/tsconfig.json
+++ b/packages/tanstackstart-react/tsconfig.json
@@ -1,5 +1,8 @@
{
"extends": "../../tsconfig.json",
"include": ["src/**/*"],
- "compilerOptions": {}
+ "compilerOptions": {
+ "module": "esnext",
+ "moduleResolution": "bundler"
+ }
}
diff --git a/packages/typescript/tsconfig.json b/packages/typescript/tsconfig.json
index 8dd1e93e2c6e..2e70d1a0c493 100644
--- a/packages/typescript/tsconfig.json
+++ b/packages/typescript/tsconfig.json
@@ -1,6 +1,5 @@
{
"compilerOptions": {
- "alwaysStrict": false,
"declaration": true,
"declarationMap": true,
"downlevelIteration": true,
diff --git a/packages/vue/tsconfig.json b/packages/vue/tsconfig.json
index fd54f069790c..9c9351f7b801 100644
--- a/packages/vue/tsconfig.json
+++ b/packages/vue/tsconfig.json
@@ -4,6 +4,8 @@
"include": ["src/**/*"],
"compilerOptions": {
+ "module": "esnext",
+ "moduleResolution": "bundler",
"lib": ["DOM", "es2020"]
}
}
diff --git a/scripts/pr-review-reminder.mjs b/scripts/pr-review-reminder.mjs
index aba64c62512e..783508571ade 100644
--- a/scripts/pr-review-reminder.mjs
+++ b/scripts/pr-review-reminder.mjs
@@ -8,7 +8,7 @@
* @mentions are narrowed as follows:
* - Individual users: not [outside collaborators](https://docs.github.com/en/organizations/managing-outside-collaborators)
* on this repo (via `repos.listCollaborators` with `affiliation: outside` — repo-scoped, no extra token).
- * - Team reviewers: only the org team `team-javascript-sdks` (by slug).
+ * - Team reviewers: only org teams in `SDK_TEAM_SLUGS` (by slug).
*
* Business days exclude weekends and a small set of recurring public holidays
* (same calendar date each year) for US, CA, and AT.
@@ -21,8 +21,13 @@
* await run({ github, context, core });
*/
-// Team @mentions only for this slug. Individuals are filtered using outside-collaborator list (see below).
-const SDK_TEAM_SLUG = 'team-javascript-sdks';
+// Team @mentions only for these slugs. Individuals are filtered using outside-collaborator list (see below).
+const SDK_TEAM_SLUGS = new Set([
+ 'team-javascript-sdks',
+ 'team-javascript-sdks-framework',
+ 'team-javascript-sdks-browser',
+ 'team-javascript-sdks-server',
+]);
// ---------------------------------------------------------------------------
// Outside collaborators (repo API — works with default GITHUB_TOKEN).
@@ -254,7 +259,7 @@ export default async function run({ github, context, core }) {
// Collect overdue team reviewers
for (const team of pendingTeams) {
- if (team.slug !== SDK_TEAM_SLUG) {
+ if (!SDK_TEAM_SLUGS.has(team.slug)) {
continue;
}
diff --git a/yarn.lock b/yarn.lock
index 5876796341f3..06a319881032 100644
--- a/yarn.lock
+++ b/yarn.lock
@@ -1702,13 +1702,6 @@
resolved "https://registry.yarnpkg.com/@babel/helper-globals/-/helper-globals-7.28.0.tgz#b9430df2aa4e17bc28665eadeae8aa1d985e6674"
integrity sha512-+W6cISkXFa1jXsDEdYA8HeevQT/FULhxzR99pxphltZcVaugps53THCeiWA8SguxxpSp3gKPiuYfSWopkLQ4hw==
-"@babel/helper-hoist-variables@^7.22.5":
- version "7.24.7"
- resolved "https://registry.yarnpkg.com/@babel/helper-hoist-variables/-/helper-hoist-variables-7.24.7.tgz#b4ede1cde2fd89436397f30dc9376ee06b0f25ee"
- integrity sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==
- dependencies:
- "@babel/types" "^7.24.7"
-
"@babel/helper-member-expression-to-functions@^7.28.5":
version "7.28.5"
resolved "https://registry.yarnpkg.com/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.28.5.tgz#f3e07a10be37ed7a63461c63e6929575945a6150"
@@ -1791,7 +1784,7 @@
resolved "https://registry.yarnpkg.com/@babel/helper-string-parser/-/helper-string-parser-7.27.1.tgz#54da796097ab19ce67ed9f88b47bb2ec49367687"
integrity sha512-qMlSxKbpRlAridDExk92nSobyDdpPijUq2DW6oDnUqd0iOGxmQjyqhMIihI9+zv4LPyZdRje2cavWPbCbWm3eA==
-"@babel/helper-validator-identifier@^7.22.20", "@babel/helper-validator-identifier@^7.25.9", "@babel/helper-validator-identifier@^7.28.5":
+"@babel/helper-validator-identifier@^7.25.9", "@babel/helper-validator-identifier@^7.28.5":
version "7.28.5"
resolved "https://registry.yarnpkg.com/@babel/helper-validator-identifier/-/helper-validator-identifier-7.28.5.tgz#010b6938fab7cb7df74aa2bbc06aa503b8fe5fb4"
integrity sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==
@@ -2356,14 +2349,14 @@
"@babel/helper-plugin-utils" "^7.28.6"
"@babel/plugin-transform-modules-systemjs@^7.18.9", "@babel/plugin-transform-modules-systemjs@^7.24.1":
- version "7.24.1"
- resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.24.1.tgz#2b9625a3d4e445babac9788daec39094e6b11e3e"
- integrity sha512-mqQ3Zh9vFO1Tpmlt8QPnbwGHzNz3lpNEMxQb1kAemn/erstyqw1r9KeOlOfo3y6xAnFEcOv2tSyrXfmMk+/YZA==
+ version "7.29.4"
+ resolved "https://registry.yarnpkg.com/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.29.4.tgz#f621105da99919c15cf4bde6fcc7346ef95e7b20"
+ integrity sha512-N7QmZ0xRZfjHOfZeQLJjwgX2zS9pdGHSVl/cjSGlo4dXMqvurfxXDMKY4RqEKzPozV78VMcd0lxyG13mlbKc4w==
dependencies:
- "@babel/helper-hoist-variables" "^7.22.5"
- "@babel/helper-module-transforms" "^7.23.3"
- "@babel/helper-plugin-utils" "^7.24.0"
- "@babel/helper-validator-identifier" "^7.22.20"
+ "@babel/helper-module-transforms" "^7.28.6"
+ "@babel/helper-plugin-utils" "^7.28.6"
+ "@babel/helper-validator-identifier" "^7.28.5"
+ "@babel/traverse" "^7.29.0"
"@babel/plugin-transform-modules-umd@^7.18.6", "@babel/plugin-transform-modules-umd@^7.24.1":
version "7.24.1"
@@ -7135,6 +7128,11 @@
resolved "https://registry.yarnpkg.com/@redis/bloom/-/bloom-1.2.0.tgz#d3fd6d3c0af3ef92f26767b56414a370c7b63b71"
integrity sha512-HG2DFjYKbpNmVXsa0keLHp/3leGJz1mjh09f2RLGGLQZzSHpkmZWuwJbAvo3QcRY8p80m5+ZdXZdYOSBLlp7Cg==
+"@redis/bloom@5.12.1":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/@redis/bloom/-/bloom-5.12.1.tgz#047dbfce93cfa7e5879fb58ad1b2afe87ebb2dc4"
+ integrity sha512-PUUfv+ms7jgPSBVoo/DN4AkPHj4D5TZSd6SbJX7egzBplkYUcKmHRE8RKia7UtZ8bSQbLguLvxVO+asKtQfZWA==
+
"@redis/client@1.5.16":
version "1.5.16"
resolved "https://registry.yarnpkg.com/@redis/client/-/client-1.5.16.tgz#1d5919077a06a4b935b0e4bef9e036eef1a10371"
@@ -7144,6 +7142,13 @@
generic-pool "3.9.0"
yallist "4.0.0"
+"@redis/client@5.12.1":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/@redis/client/-/client-5.12.1.tgz#a35a2bac546c727d7915d2d91b63a77111e51ebd"
+ integrity sha512-7aPGWeqA3uFm43o19umzdl16CEjK/JQGtSXVPevplTaOU3VJA/rseBC1QvYUz9lLDIMBimc4SW/zrW4S89BaCA==
+ dependencies:
+ cluster-key-slot "1.1.2"
+
"@redis/graph@1.1.1":
version "1.1.1"
resolved "https://registry.yarnpkg.com/@redis/graph/-/graph-1.1.1.tgz#8c10df2df7f7d02741866751764031a957a170ea"
@@ -7154,16 +7159,31 @@
resolved "https://registry.yarnpkg.com/@redis/json/-/json-1.0.6.tgz#b7a7725bbb907765d84c99d55eac3fcf772e180e"
integrity sha512-rcZO3bfQbm2zPRpqo82XbW8zg4G/w4W3tI7X8Mqleq9goQjAGLL7q/1n1ZX4dXEAmORVZ4s1+uKLaUOg7LrUhw==
+"@redis/json@5.12.1":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/@redis/json/-/json-5.12.1.tgz#52aff987abe4d41ec9644857fb00f16c7a97fdb7"
+ integrity sha512-eOze75esLve4vfqDel7aMX08CNaiLLQS2fV8mpRN9NxPe1rVR4vQyYiW/OgtGUysF6QOr9ANhfxABKNOJfXdKg==
+
"@redis/search@1.1.6":
version "1.1.6"
resolved "https://registry.yarnpkg.com/@redis/search/-/search-1.1.6.tgz#33bcdd791d9ed88ab6910243a355d85a7fedf756"
integrity sha512-mZXCxbTYKBQ3M2lZnEddwEAks0Kc7nauire8q20oA0oA/LoA+E/b5Y5KZn232ztPb1FkIGqo12vh3Lf+Vw5iTw==
+"@redis/search@5.12.1":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/@redis/search/-/search-5.12.1.tgz#b7a738be918c8a7af91e39c5bd2023f30f392981"
+ integrity sha512-ItlxbxC9cKI6IU1TLWoczwJCRb6TdmkEpWv05UrPawqaAnWGRu3rcIqsc5vN483T2fSociuyV1UkWIL5I4//2w==
+
"@redis/time-series@1.0.5":
version "1.0.5"
resolved "https://registry.yarnpkg.com/@redis/time-series/-/time-series-1.0.5.tgz#a6d70ef7a0e71e083ea09b967df0a0ed742bc6ad"
integrity sha512-IFjIgTusQym2B5IZJG3XKr5llka7ey84fw/NOYqESP5WUfQs9zz1ww/9+qoz4ka/S6KcGBodzlCeZ5UImKbscg==
+"@redis/time-series@5.12.1":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/@redis/time-series/-/time-series-5.12.1.tgz#15b6deaaf3716bc2633311c0ed18201c9299392d"
+ integrity sha512-c6JL6E3EcZJuNqKFz+KM+l9l5mpcQiKvTwgA3blt5glWJ8hjDk0yeHN3beE/MpqYIQ8UEX44ItQzgkE/gCBELQ==
+
"@remix-run/node-fetch-server@^0.13.0":
version "0.13.0"
resolved "https://registry.yarnpkg.com/@remix-run/node-fetch-server/-/node-fetch-server-0.13.0.tgz#93be9c2e0e6f12512be471501e3a86dda295b178"
@@ -7515,130 +7535,130 @@
estree-walker "^2.0.2"
picomatch "^4.0.2"
-"@rollup/rollup-android-arm-eabi@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.59.0.tgz#a6742c74c7d9d6d604ef8a48f99326b4ecda3d82"
- integrity sha512-upnNBkA6ZH2VKGcBj9Fyl9IGNPULcjXRlg0LLeaioQWueH30p6IXtJEbKAgvyv+mJaMxSm1l6xwDXYjpEMiLMg==
-
-"@rollup/rollup-android-arm64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.59.0.tgz#97247be098de4df0c11971089fd2edf80a5da8cf"
- integrity sha512-hZ+Zxj3SySm4A/DylsDKZAeVg0mvi++0PYVceVyX7hemkw7OreKdCvW2oQ3T1FMZvCaQXqOTHb8qmBShoqk69Q==
-
-"@rollup/rollup-darwin-arm64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.59.0.tgz#674852cf14cf11b8056e0b1a2f4e872b523576cf"
- integrity sha512-W2Psnbh1J8ZJw0xKAd8zdNgF9HRLkdWwwdWqubSVk0pUuQkoHnv7rx4GiF9rT4t5DIZGAsConRE3AxCdJ4m8rg==
-
-"@rollup/rollup-darwin-x64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.59.0.tgz#36dfd7ed0aaf4d9d89d9ef983af72632455b0246"
- integrity sha512-ZW2KkwlS4lwTv7ZVsYDiARfFCnSGhzYPdiOU4IM2fDbL+QGlyAbjgSFuqNRbSthybLbIJ915UtZBtmuLrQAT/w==
-
-"@rollup/rollup-freebsd-arm64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.59.0.tgz#2f87c2074b4220260fdb52a9996246edfc633c22"
- integrity sha512-EsKaJ5ytAu9jI3lonzn3BgG8iRBjV4LxZexygcQbpiU0wU0ATxhNVEpXKfUa0pS05gTcSDMKpn3Sx+QB9RlTTA==
-
-"@rollup/rollup-freebsd-x64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.59.0.tgz#9b5a26522a38a95dc06616d1939d4d9a76937803"
- integrity sha512-d3DuZi2KzTMjImrxoHIAODUZYoUUMsuUiY4SRRcJy6NJoZ6iIqWnJu9IScV9jXysyGMVuW+KNzZvBLOcpdl3Vg==
-
-"@rollup/rollup-linux-arm-gnueabihf@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.59.0.tgz#86aa4859385a8734235b5e40a48e52d770758c3a"
- integrity sha512-t4ONHboXi/3E0rT6OZl1pKbl2Vgxf9vJfWgmUoCEVQVxhW6Cw/c8I6hbbu7DAvgp82RKiH7TpLwxnJeKv2pbsw==
-
-"@rollup/rollup-linux-arm-musleabihf@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.59.0.tgz#cbe70e56e6ece8dac83eb773b624fc9e5a460976"
- integrity sha512-CikFT7aYPA2ufMD086cVORBYGHffBo4K8MQ4uPS/ZnY54GKj36i196u8U+aDVT2LX4eSMbyHtyOh7D7Zvk2VvA==
-
-"@rollup/rollup-linux-arm64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.59.0.tgz#d14992a2e653bc3263d284bc6579b7a2890e1c45"
- integrity sha512-jYgUGk5aLd1nUb1CtQ8E+t5JhLc9x5WdBKew9ZgAXg7DBk0ZHErLHdXM24rfX+bKrFe+Xp5YuJo54I5HFjGDAA==
-
-"@rollup/rollup-linux-arm64-musl@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.59.0.tgz#2fdd1ddc434ea90aeaa0851d2044789b4d07f6da"
- integrity sha512-peZRVEdnFWZ5Bh2KeumKG9ty7aCXzzEsHShOZEFiCQlDEepP1dpUl/SrUNXNg13UmZl+gzVDPsiCwnV1uI0RUA==
-
-"@rollup/rollup-linux-loong64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.59.0.tgz#8a181e6f89f969f21666a743cd411416c80099e7"
- integrity sha512-gbUSW/97f7+r4gHy3Jlup8zDG190AuodsWnNiXErp9mT90iCy9NKKU0Xwx5k8VlRAIV2uU9CsMnEFg/xXaOfXg==
-
-"@rollup/rollup-linux-loong64-musl@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.59.0.tgz#904125af2babc395f8061daa27b5af1f4e3f2f78"
- integrity sha512-yTRONe79E+o0FWFijasoTjtzG9EBedFXJMl888NBEDCDV9I2wGbFFfJQQe63OijbFCUZqxpHz1GzpbtSFikJ4Q==
-
-"@rollup/rollup-linux-ppc64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.59.0.tgz#a57970ac6864c9a3447411a658224bdcf948be22"
- integrity sha512-sw1o3tfyk12k3OEpRddF68a1unZ5VCN7zoTNtSn2KndUE+ea3m3ROOKRCZxEpmT9nsGnogpFP9x6mnLTCaoLkA==
-
-"@rollup/rollup-linux-ppc64-musl@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.59.0.tgz#bb84de5b26870567a4267666e08891e80bb56a63"
- integrity sha512-+2kLtQ4xT3AiIxkzFVFXfsmlZiG5FXYW7ZyIIvGA7Bdeuh9Z0aN4hVyXS/G1E9bTP/vqszNIN/pUKCk/BTHsKA==
-
-"@rollup/rollup-linux-riscv64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.59.0.tgz#72d00d2c7fb375ce3564e759db33f17a35bffab9"
- integrity sha512-NDYMpsXYJJaj+I7UdwIuHHNxXZ/b/N2hR15NyH3m2qAtb/hHPA4g4SuuvrdxetTdndfj9b1WOmy73kcPRoERUg==
-
-"@rollup/rollup-linux-riscv64-musl@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.59.0.tgz#4c166ef58e718f9245bd31873384ba15a5c1a883"
- integrity sha512-nLckB8WOqHIf1bhymk+oHxvM9D3tyPndZH8i8+35p/1YiVoVswPid2yLzgX7ZJP0KQvnkhM4H6QZ5m0LzbyIAg==
-
-"@rollup/rollup-linux-s390x-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.59.0.tgz#bb5025cde9a61db478c2ca7215808ad3bce73a09"
- integrity sha512-oF87Ie3uAIvORFBpwnCvUzdeYUqi2wY6jRFWJAy1qus/udHFYIkplYRW+wo+GRUP4sKzYdmE1Y3+rY5Gc4ZO+w==
-
-"@rollup/rollup-linux-x64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.59.0.tgz#9b66b1f9cd95c6624c788f021c756269ffed1552"
- integrity sha512-3AHmtQq/ppNuUspKAlvA8HtLybkDflkMuLK4DPo77DfthRb71V84/c4MlWJXixZz4uruIH4uaa07IqoAkG64fg==
-
-"@rollup/rollup-linux-x64-musl@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.59.0.tgz#b007ca255dc7166017d57d7d2451963f0bd23fd9"
- integrity sha512-2UdiwS/9cTAx7qIUZB/fWtToJwvt0Vbo0zmnYt7ED35KPg13Q0ym1g442THLC7VyI6JfYTP4PiSOWyoMdV2/xg==
-
-"@rollup/rollup-openbsd-x64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.59.0.tgz#e8b357b2d1aa2c8d76a98f5f0d889eabe93f4ef9"
- integrity sha512-M3bLRAVk6GOwFlPTIxVBSYKUaqfLrn8l0psKinkCFxl4lQvOSz8ZrKDz2gxcBwHFpci0B6rttydI4IpS4IS/jQ==
-
-"@rollup/rollup-openharmony-arm64@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.59.0.tgz#96c2e3f4aacd3d921981329831ff8dde492204dc"
- integrity sha512-tt9KBJqaqp5i5HUZzoafHZX8b5Q2Fe7UjYERADll83O4fGqJ49O1FsL6LpdzVFQcpwvnyd0i+K/VSwu/o/nWlA==
-
-"@rollup/rollup-win32-arm64-msvc@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.59.0.tgz#2d865149d706d938df8b4b8f117e69a77646d581"
- integrity sha512-V5B6mG7OrGTwnxaNUzZTDTjDS7F75PO1ae6MJYdiMu60sq0CqN5CVeVsbhPxalupvTX8gXVSU9gq+Rx1/hvu6A==
-
-"@rollup/rollup-win32-ia32-msvc@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.59.0.tgz#abe1593be0fa92325e9971c8da429c5e05b92c36"
- integrity sha512-UKFMHPuM9R0iBegwzKF4y0C4J9u8C6MEJgFuXTBerMk7EJ92GFVFYBfOZaSGLu6COf7FxpQNqhNS4c4icUPqxA==
-
-"@rollup/rollup-win32-x64-gnu@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.59.0.tgz#c4af3e9518c9a5cd4b1c163dc81d0ad4d82e7eab"
- integrity sha512-laBkYlSS1n2L8fSo1thDNGrCTQMmxjYY5G0WFWjFFYZkKPjsMBsgJfGf4TLxXrF6RyhI60L8TMOjBMvXiTcxeA==
-
-"@rollup/rollup-win32-x64-msvc@4.59.0":
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.59.0.tgz#4584a8a87b29188a4c1fe987a9fcf701e256d86c"
- integrity sha512-2HRCml6OztYXyJXAvdDXPKcawukWY2GpR5/nxKp4iBgiO3wcoEGkAaqctIbZcNB6KlUQBIqt8VYkNSj2397EfA==
+"@rollup/rollup-android-arm-eabi@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.3.tgz#31503ca40424374cd6c5198031cf4d5a73de9727"
+ integrity sha512-x35CNW/ANXG3hE/EZpRU8MXX1JDN86hBb2wMGAtltkz7pc6cxgjpy1OMMfDosOQ+2hWqIkag/fGok1Yady9nGw==
+
+"@rollup/rollup-android-arm64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.3.tgz#7cbc30c88507013d0f982cfeb8884337ba1e0bb2"
+ integrity sha512-xw3xtkDApIOGayehp2+Rz4zimfkaX65r4t47iy+ymQB2G4iJCBBfj0ogVg5jpvjpn8UWn/+q9tprxleYeNp3Hw==
+
+"@rollup/rollup-darwin-arm64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.3.tgz#bc341a93bb2111326a2865f55d1d23baedecf40c"
+ integrity sha512-vo6Y5Qfpx7/5EaamIwi0WqW2+zfiusVihKatLvtN1VFVy3D13uERk/6gZLU1UiHRL6fDXqj/ELIeVRGnvcTE1g==
+
+"@rollup/rollup-darwin-x64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.3.tgz#dfa0236581c55ecc0bcaeb2ea1f2e800c58dc3e2"
+ integrity sha512-D+0QGcZhBzTN82weOnsSlY7V7+RMmPuF1CkbxyMAGE8+ZHeUjyb76ZiWmBlCu//AQQONvxcqRbwZTajZKqjuOw==
+
+"@rollup/rollup-freebsd-arm64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.3.tgz#4c5977413b87808a13b5edd524e46fafddb85b52"
+ integrity sha512-6HnvHCT7fDyj6R0Ph7A6x8dQS/S38MClRWeDLqc0MdfWkxjiu1HSDYrdPhqSILzjTIC/pnXbbJbo+ft+gy/9hQ==
+
+"@rollup/rollup-freebsd-x64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.3.tgz#5cb2cee62ffee3ada4a0b44353e96cf98cfc7c3c"
+ integrity sha512-KHLgC3WKlUYW3ShFKnnosZDOJ0xjg9zp7au3sIm2bs/tGBeC2ipmvRh/N7JKi0t9Ue20C0dpEshi8WUubg+cnA==
+
+"@rollup/rollup-linux-arm-gnueabihf@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.3.tgz#04700cad36dd43ae81044fe7ee73e925845c4b85"
+ integrity sha512-DV6fJoxEYWJOvaZIsok7KrYl0tPvga5OZ2yvKHNNYyk/2roMLqQAbGhr78EQ5YhHpnhLKJD3S1WFusAkmUuV5g==
+
+"@rollup/rollup-linux-arm-musleabihf@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.3.tgz#548ebf3997b3a6dcc7cdd7da813ff0c46000ac0a"
+ integrity sha512-mQKoJAzvuOs6F+TZybQO4GOTSMUu7v0WdxEk24krQ/uUxXoPTtHjuaUuPmFhtBcM4K0ons8nrE3JyhTuCFtT/w==
+
+"@rollup/rollup-linux-arm64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.3.tgz#0264608f504b33725639ebe93be02c40e71a35c1"
+ integrity sha512-Whjj2qoiJ6+OOJMGptTYazaJvjOJm+iKHpXQM1P3LzGjt7Ff++Tp7nH4N8J/BUA7R9IHfDyx4DJIflifwnbmIA==
+
+"@rollup/rollup-linux-arm64-musl@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.3.tgz#147cf4889502cd3b331a800b8ca6741f87873079"
+ integrity sha512-4YTNHKqGng5+yiZt3mg77nmyuCfmNfX4fPmyUapBcIk+BdwSwmCWGXOUxhXbBEkFHtoN5boLj/5NON+u5QC9tg==
+
+"@rollup/rollup-linux-loong64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.3.tgz#0c27c6b5258dcb3d0290e3bd04ba6277c9d7e541"
+ integrity sha512-SU3kNlhkpI4UqlUc2VXPGK9o886ZsSeGfMAX2ba2b8DKmMXq4AL7KUrkSWVbb7koVqx41Yczx6dx5PNargIrEA==
+
+"@rollup/rollup-linux-loong64-musl@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.3.tgz#f0f18075ea0bfa2c992f8e3933b39b6ef91f7799"
+ integrity sha512-6lDLl5h4TXpB1mTf2rQWnAk/LcXrx9vBfu/DT5TIPhvMhRWaZ5MxkIc8u4lJAmBo6klTe1ywXIUHFjylW505sg==
+
+"@rollup/rollup-linux-ppc64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.3.tgz#149bb5cb8893589ffaa1924b4eac4282e9fa4c69"
+ integrity sha512-BMo8bOw8evlup/8G+cj5xWtPyp93xPdyoSN16Zy90Q2QZ0ZYRhCt6ZJSwbrRzG9HApFabjwj2p25TUPDWrhzqQ==
+
+"@rollup/rollup-linux-ppc64-musl@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.3.tgz#200a063e298b05f996917d2aa53de749d54c0ca0"
+ integrity sha512-E0L8X1dZN1/Rph+5VPF6Xj2G7JJvMACVXtamTJIDrVI44Y3K+G8gQaMEAavbqCGTa16InptiVrX6eM6pmJ+7qA==
+
+"@rollup/rollup-linux-riscv64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.3.tgz#6d6d6eb996197ba86f95f9a6c442bc862f0756d4"
+ integrity sha512-oZJ/WHaVfHUiRAtmTAeo3DcevNsVvH8mbvodjZy7D5QKvCefO371SiKRpxoDcCxB3PTRTLayWBkvmDQKTcX/sw==
+
+"@rollup/rollup-linux-riscv64-musl@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.3.tgz#9deb86001785cfcbc761457f50cd7c112fda0df9"
+ integrity sha512-Dhbyh7j9FybM3YaTgaHmVALwA8AkUwTPccyCQ79TG9AJUsMQqgN1DDEZNr4+QUfwiWvLDumW5vdwzoeUF+TNxQ==
+
+"@rollup/rollup-linux-s390x-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.3.tgz#d8228720c6e42da190d96c31a3495d70cf8284b9"
+ integrity sha512-cJd1X5XhHHlltkaypz1UcWLA8AcoIi1aWhsvaWDskD1oz2eKCypnqvTQ8ykMNI0RSmm7NkTdSqSSD7zM0xa6Ig==
+
+"@rollup/rollup-linux-x64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.3.tgz#df6bb38617a66a842bd2aeac9560cd729d084258"
+ integrity sha512-DAZDBHQfG2oQuhY7mc6I3/qB4LU2fQCjRvxbDwd/Jdvb9fypP4IJ4qmtu6lNjes6B531AI8cg1aKC2di97bUxA==
+
+"@rollup/rollup-linux-x64-musl@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.3.tgz#75e3e72849266b4fdd65f2da6c62423051e35636"
+ integrity sha512-cRxsE8c13mZOh3vP+wLDxpQBRrOHDIGOWyDL93Sy0Ga8y515fBcC2pjUfFwUe5T7tqvTvWbCpg1URM/AXdWIXA==
+
+"@rollup/rollup-openbsd-x64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.3.tgz#e1080f0efb8b15cda39b3e62de5fb806079ab6e9"
+ integrity sha512-QaWcIgRxqEdQdhJqW4DJctsH6HCmo5vHxY0krHSX4jMtOqfzC+dqDGuHM87bu4H8JBeibWx7jFz+h6/4C8wA5Q==
+
+"@rollup/rollup-openharmony-arm64@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.3.tgz#1fbda2d95c29dbfceb62785431754cd5aab86c72"
+ integrity sha512-AaXwSvUi3QIPtroAUw1t5yHGIyqKEXwH54WUocFolZhpGDruJcs8c+xPNDRn4XiQsS7MEwnYsHW2l0MBLDMkWg==
+
+"@rollup/rollup-win32-arm64-msvc@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.3.tgz#deab3470815f97996f1d0d3608549cf1b7e4ffc2"
+ integrity sha512-65LAKM/bAWDqKNEelHlcHvm2V+Vfb8C6INFxQXRHCvaVN1rJfwr4NvdP4FyzUaLqWfaCGaadf6UbTm8xJeYfEg==
+
+"@rollup/rollup-win32-ia32-msvc@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.3.tgz#817acae2ed4572960b59235ff2322381b6d82f26"
+ integrity sha512-EEM2gyhBF5MFnI6vMKdX1LAosE627RGBzIoGMdLloPZkXrUN0Ckqgr2Qi8+J3zip/8NVVro3/FjB+tjhZUgUHA==
+
+"@rollup/rollup-win32-x64-gnu@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.3.tgz#48129be99b0250d76b9c6d0ac983bef563a1c48a"
+ integrity sha512-E5Eb5H/DpxaoXH++Qkv28RcUJboMopmdDUALBczvHMf7hNIxaDZqwY5lK12UK1BHacSmvupoEWGu+n993Z0y1A==
+
+"@rollup/rollup-win32-x64-msvc@4.60.3":
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.3.tgz#cc6f094a3ffe5556bb4a831ee6fb572b8cd81a75"
+ integrity sha512-hPt/bgL5cE+Qp+/TPHBqptcAgPzgj46mPcg/16zNUmbQk0j+mOEQV/+Lqu8QRtDV3Ek95Q6FeFITpuhl6OTsAA==
"@rtsao/scc@^1.1.0":
version "1.1.0"
@@ -7659,102 +7679,68 @@
resolved "https://registry.yarnpkg.com/@sec-ant/readable-stream/-/readable-stream-0.4.1.tgz#60de891bb126abfdc5410fdc6166aca065f10a0c"
integrity sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==
-"@sentry-internal/node-cpu-profiler@^2.2.0":
- version "2.2.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/node-cpu-profiler/-/node-cpu-profiler-2.2.0.tgz#0640d4aebb4d36031658ccff83dc22b76f437ede"
- integrity sha512-oLHVYurqZfADPh5hvmQYS5qx8t0UZzT2u6+/68VXsFruQEOnYJTODKgU3BVLmemRs3WE6kCJjPeFdHVYOQGSzQ==
+"@sentry-internal/node-cpu-profiler@^2.4.0":
+ version "2.4.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/node-cpu-profiler/-/node-cpu-profiler-2.4.0.tgz#3f14cb7c3637b48e87056c2a5787c5e0aa90b987"
+ integrity sha512-zMrbqkd05LS1Ibt+js4R1aMmjdAO0yi9xiywWeulYs/bxN8P5qq20QHYleI76MorsocvYJAFo9GkYfzyzMd6Og==
dependencies:
detect-libc "^2.0.3"
node-abi "^3.73.0"
-"@sentry-internal/node-native-stacktrace@^0.4.0":
- version "0.4.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/node-native-stacktrace/-/node-native-stacktrace-0.4.0.tgz#8f6e7a21537373a5623714c14d3350e1bb4602f0"
- integrity sha512-cuRBBqnsHOJJqLCii9GvwedzjetsihIarq7TxCjgG88JyF8TZWRMlUBu/OogWhYZVU8uHqAeSvpbzolnmdhdkw==
+"@sentry-internal/node-native-stacktrace@^0.5.0":
+ version "0.5.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/node-native-stacktrace/-/node-native-stacktrace-0.5.0.tgz#834a5326fd45a97d1abe3b6110e9b268c7a2fa4e"
+ integrity sha512-vi+yY8D0TgUdpd8ja2BPqm689N+WZPWfXNkx0fzKYlVRGymUpQeyUrz2b6dscYE8Qr3ZiA6sz8RtXeQy1r9ZTQ==
dependencies:
detect-libc "^2.0.4"
node-abi "^3.89.0"
-"@sentry-internal/rrdom@2.34.0":
- version "2.34.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrdom/-/rrdom-2.34.0.tgz#fccc9fe211c3995d4200abafbe8d75b671961ee9"
- integrity sha512-NFGNzI9iGYpJ1D7j8qLu4pFMGDMumQzM9/wMPQpmDQTCZYV25To5lxT7z5K1huPAIyh5NLW+hQlMx/hXxXwJ6w==
+"@sentry-internal/rrdom@2.42.0":
+ version "2.42.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/rrdom/-/rrdom-2.42.0.tgz#fc26d88d01edce7580b66f255b8ad65816829aaa"
+ integrity sha512-ecNUqhoDf64dOsGhW4/46AzNWQaAvM+xpruirOWimZE4CsXSWwM558BbIa5qsm9f5pvsnnMHzxQZM0EOf2SZ0g==
dependencies:
- "@sentry-internal/rrweb-snapshot" "2.34.0"
-
-"@sentry-internal/rrdom@2.40.0":
- version "2.40.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrdom/-/rrdom-2.40.0.tgz#2afd76ab6695a63e7b000cb3c101029cd2ccd48b"
- integrity sha512-QBnn2F0qi4Lx7TZW41CdRek/vWWLZCDx1Ywc1SimBX+byuVmNP84qvnVI4wKMoDvU6AcQiWHAgX2tGoa3Ol8pw==
- dependencies:
- "@sentry-internal/rrweb-snapshot" "2.40.0"
-
-"@sentry-internal/rrweb-snapshot@2.34.0":
- version "2.34.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-snapshot/-/rrweb-snapshot-2.34.0.tgz#79c2049b6c887e3c128d5fa80d6f745a61dd0e68"
- integrity sha512-9Tb8jwVufn5GLV0d/CTuoZWo2O06ZB+xWeTJdEkbtJ6PAmO/Q7GQI3uNIx0pfFEnXP+0Km8CKKxpwkEM0z2m6w==
+ "@sentry-internal/rrweb-snapshot" "2.42.0"
-"@sentry-internal/rrweb-snapshot@2.40.0":
- version "2.40.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-snapshot/-/rrweb-snapshot-2.40.0.tgz#873e9be9967d7e4b6518beb7d9dfbdf5bbe043ac"
- integrity sha512-uxYlYUIiybRqcyp5go46G5lcOswTFfeen8PelYVQsiLX34I7eugNfLgFchpBdiWv1FXwsautBWyOsZlxCPc3Zw==
+"@sentry-internal/rrweb-snapshot@2.42.0":
+ version "2.42.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-snapshot/-/rrweb-snapshot-2.42.0.tgz#fdf3ce47f2c08601f075bdba142d3305d3435455"
+ integrity sha512-LB32c0hxFQbE4mNXWt31k/blPceD+9SkkyGI90mFcL6Mevca6ZEw+YejvgHUt0sM58WPRbpLPXo+U6XFBzVBIw==
-"@sentry-internal/rrweb-types@2.34.0":
- version "2.34.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-types/-/rrweb-types-2.34.0.tgz#32b853d93d1d9a1ae1888b17d84b24e674fadee0"
- integrity sha512-6g5TN8YjqxrZOSQZGMLeZ2XYXdmqaKzPdIzKRySK+rKT/1fJE2gcefJEPDxiix0z/6/v5hGu/Ia8+wbJ7ACHwQ==
+"@sentry-internal/rrweb-types@2.42.0":
+ version "2.42.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-types/-/rrweb-types-2.42.0.tgz#655c86e7822f8169d108bad1261d8108d3627bbb"
+ integrity sha512-/+mzE1NGd5QaJy1OUqtBszHLTe5KziuKby9ULsonVEnru+0JbuJRiPA+qWLft6MfdyCcfm0Q8GYgy0H85sETbw==
dependencies:
- "@sentry-internal/rrweb-snapshot" "2.34.0"
+ "@sentry-internal/rrweb-snapshot" "2.42.0"
"@types/css-font-loading-module" "0.0.7"
-"@sentry-internal/rrweb-types@2.40.0":
- version "2.40.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb-types/-/rrweb-types-2.40.0.tgz#0c16376b83d264548f67e757dc28ae01d2e46991"
- integrity sha512-d4MB1NI7KeomX0vRy0E7OQJHI+WvbeKvwAHqG/xV94A4ZscKkF7DoAQUvyeBsFf3tED/SuwV75HNEnq2uwk/cQ==
+"@sentry-internal/rrweb@2.42.0":
+ version "2.42.0"
+ resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb/-/rrweb-2.42.0.tgz#d66013382a1b887d2f77978cb107cf5830542864"
+ integrity sha512-Rh3Qpt5E6+woQ5aupT0SECUAy0cCi8eyEFVyIGUDJW7lGeX/vRy5Mv75N4uQzy+RELxH5yhwkaOK/H3Ncf0FHw==
dependencies:
- "@sentry-internal/rrweb-snapshot" "2.40.0"
- "@types/css-font-loading-module" "0.0.7"
-
-"@sentry-internal/rrweb@2.34.0":
- version "2.34.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb/-/rrweb-2.34.0.tgz#a32945504f1ba2ba60f2ebd7a17d2df5e1aa010d"
- integrity sha512-NAzpnMOvsIV8o6rEvJ7SDs/TwuHXSrRmuAYYedrOQyJoLq00HF+6wQGe6SAyXv/bkumXmQfjyJ6bv4XNtC4S6g==
- dependencies:
- "@sentry-internal/rrdom" "2.34.0"
- "@sentry-internal/rrweb-snapshot" "2.34.0"
- "@sentry-internal/rrweb-types" "2.34.0"
- "@types/css-font-loading-module" "0.0.7"
- "@xstate/fsm" "^1.4.0"
- base64-arraybuffer "^1.0.1"
- fflate "^0.4.4"
- mitt "^3.0.0"
-
-"@sentry-internal/rrweb@2.40.0":
- version "2.40.0"
- resolved "https://registry.yarnpkg.com/@sentry-internal/rrweb/-/rrweb-2.40.0.tgz#9d60899800f21f061f38b1b62ddc64df1ff525bb"
- integrity sha512-niFva5QmCTfavotLvIeFSvO0rfzbJwW04igcPaWAqTDATi+Xife27iBeVMBmjpHEWygGYkBaGyBQUUi8zUdAyg==
- dependencies:
- "@sentry-internal/rrdom" "2.40.0"
- "@sentry-internal/rrweb-snapshot" "2.40.0"
- "@sentry-internal/rrweb-types" "2.40.0"
+ "@sentry-internal/rrdom" "2.42.0"
+ "@sentry-internal/rrweb-snapshot" "2.42.0"
+ "@sentry-internal/rrweb-types" "2.42.0"
"@types/css-font-loading-module" "0.0.7"
"@xstate/fsm" "^1.4.0"
base64-arraybuffer "^1.0.1"
fflate "^0.4.4"
mitt "^3.0.0"
-"@sentry/babel-plugin-component-annotate@5.2.0":
- version "5.2.0"
- resolved "https://registry.yarnpkg.com/@sentry/babel-plugin-component-annotate/-/babel-plugin-component-annotate-5.2.0.tgz#6d6f3c47d7f795f5dfbb9b59abef6ab33e5e7f2d"
- integrity sha512-8LbOI5Kzb5F0+7LVQPi2+zGz1iPiRRFhM+7uZ/ZQ33L9BmDOYNIy3xWxCfMw2JCuMXXaxF47XCjGmR22/B0WPg==
+"@sentry/babel-plugin-component-annotate@5.3.0":
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/@sentry/babel-plugin-component-annotate/-/babel-plugin-component-annotate-5.3.0.tgz#356218f747969f9af970987dcf0f17ec81d6e50c"
+ integrity sha512-p4q8gn8wcFqZGP/s2MnJCAAd8fTikaU6A0mM97RDHQgStcrYiaS0Sc5zUNfb1V+UOLPuvdEdL6MwyxfzjYJQTA==
-"@sentry/bundler-plugin-core@5.2.0", "@sentry/bundler-plugin-core@^5.2.0":
- version "5.2.0"
- resolved "https://registry.yarnpkg.com/@sentry/bundler-plugin-core/-/bundler-plugin-core-5.2.0.tgz#805ab7820b23d21ba5267e97db7300df35aede88"
- integrity sha512-+C0x4gEIJRgoMwyRFGx+TFiJ1Po2BZlT1v61+PnouiaprKL5qtZG8n5PXx/5LPLDsVjSIcXjnDrTz9aSm8SJ3w==
+"@sentry/bundler-plugin-core@5.3.0", "@sentry/bundler-plugin-core@^5.3.0":
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/@sentry/bundler-plugin-core/-/bundler-plugin-core-5.3.0.tgz#2772866dcb076c36721d2acab1010a6fc0b3ff2f"
+ integrity sha512-L5T60sWdAI3qWwdg3Ptwek/0TY59PERrxyqp4XMUkroayQvGd9r5dIW9Q1kSeXX9iJ442nXbFZKAOyCKV4Z13Q==
dependencies:
"@babel/core" "^7.18.5"
- "@sentry/babel-plugin-component-annotate" "5.2.0"
+ "@sentry/babel-plugin-component-annotate" "5.3.0"
"@sentry/cli" "^2.58.5"
dotenv "^16.3.1"
find-up "^5.0.0"
@@ -7821,28 +7807,28 @@
"@sentry/cli-win32-i686" "2.58.5"
"@sentry/cli-win32-x64" "2.58.5"
-"@sentry/rollup-plugin@5.2.0", "@sentry/rollup-plugin@^5.2.0":
- version "5.2.0"
- resolved "https://registry.yarnpkg.com/@sentry/rollup-plugin/-/rollup-plugin-5.2.0.tgz#41601fa35fdcf9a43cff9807cdca012780d2fd5b"
- integrity sha512-a8LfpvcYMFtFSroro5MpCcOoS528LeLfUHzxWURnpofOnY+Aso9Si4y4dFlna+RKqxCXjmFbn6CLnfI+YrHysQ==
+"@sentry/rollup-plugin@5.3.0", "@sentry/rollup-plugin@^5.3.0":
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/@sentry/rollup-plugin/-/rollup-plugin-5.3.0.tgz#1dbfbee8d5d2a0f6acc245f4dd05e46c66291710"
+ integrity sha512-hgPGPYdQJ/G1cGYOxAb7d4z3V+/k/E5/P/5TFPEEBLuIbFFk+JG0CISUDJdzXJjO382Lb99PBJuXGbueBmO79w==
dependencies:
- "@sentry/bundler-plugin-core" "5.2.0"
+ "@sentry/bundler-plugin-core" "5.3.0"
magic-string "~0.30.8"
-"@sentry/vite-plugin@^5.2.0":
- version "5.2.0"
- resolved "https://registry.yarnpkg.com/@sentry/vite-plugin/-/vite-plugin-5.2.0.tgz#eca4c5eebe00696ded98e055f185faf846886f19"
- integrity sha512-4Jo3ixBspso5HY81PDvZdRXkH9wYGVmcw/0a2IX9ejbyKBdHqkYg4IhAtNqGUAyGuHwwRS9Y1S+sCMvrXv6htw==
+"@sentry/vite-plugin@^5.3.0":
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/@sentry/vite-plugin/-/vite-plugin-5.3.0.tgz#6cb63bfc1b0d1613a151de456b9c66f6cb21b1d2"
+ integrity sha512-qcoSzo4n2MulVQ70UUPLq6dTleb2a2HwL2wuwvAgWhPChrYTuk6A6mDg6aQb9fairPAwFPiU9PzOANpoDJcz1A==
dependencies:
- "@sentry/bundler-plugin-core" "5.2.0"
- "@sentry/rollup-plugin" "5.2.0"
+ "@sentry/bundler-plugin-core" "5.3.0"
+ "@sentry/rollup-plugin" "5.3.0"
-"@sentry/webpack-plugin@^5.2.0":
- version "5.2.0"
- resolved "https://registry.yarnpkg.com/@sentry/webpack-plugin/-/webpack-plugin-5.2.0.tgz#6d190986198545c4b3046205f99ffcc601e8d936"
- integrity sha512-ssV/uJK3ixf8UHBrNdLBXcnprUwppJNilbFv+19I81KTH4gVwzKXsVTMO91j6lyAXtk2mORwmEFwxZqScFfc7g==
+"@sentry/webpack-plugin@^5.3.0":
+ version "5.3.0"
+ resolved "https://registry.yarnpkg.com/@sentry/webpack-plugin/-/webpack-plugin-5.3.0.tgz#7c28fbc3f4fbfe07c51f63f4a403c9c7a28ca9d7"
+ integrity sha512-i3OQUrS0FZlXLgq57RIKDp+vHHzuvYKPCKewAPXULWKMsBXFGhP6veGRQ+6To/pmZkkXjEX5ofVNDy9C3jEPKQ==
dependencies:
- "@sentry/bundler-plugin-core" "5.2.0"
+ "@sentry/bundler-plugin-core" "5.3.0"
"@shikijs/core@1.29.2":
version "1.29.2"
@@ -17314,9 +17300,9 @@ fast-text-encoding@^1.0.0:
integrity sha512-dtm4QZH9nZtcDt8qJiOH9fcQd1NAgi+K1O2DbE6GG1PPCK/BWfOH3idCTRQ4ImXRUOyopDEgDEnVEE7Y/2Wrig==
fast-uri@^3.0.1:
- version "3.0.6"
- resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.6.tgz#88f130b77cfaea2378d56bf970dea21257a68748"
- integrity sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==
+ version "3.1.2"
+ resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.1.2.tgz#8af3d4fc9d3e71b11572cc2673b514a7d1a8c8ec"
+ integrity sha512-rVjf7ArG3LTk+FS6Yw81V1DLuZl1bRbNrev6Tmd/9RaroeeRRJhAt7jg/6YFxbvAQXUCavSoZhPPj6oOx+5KjQ==
fast-xml-builder@^1.1.5:
version "1.1.7"
@@ -18871,10 +18857,10 @@ homedir-polyfill@^1.0.1:
dependencies:
parse-passwd "^1.0.0"
-hono@^4.12.14:
- version "4.12.14"
- resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.14.tgz#4777c9512b7c84138e4f09e61e3d2fa305eb1414"
- integrity sha512-am5zfg3yu6sqn5yjKBNqhnTX7Cv+m00ox+7jbaKkrLMRJ4rAdldd1xPd/JzbBWspqaQv6RSTrgFN95EsfhC+7w==
+hono@^4.12.18:
+ version "4.12.18"
+ resolved "https://registry.yarnpkg.com/hono/-/hono-4.12.18.tgz#f6d301938868c3a8bdb639495f4e326a19181505"
+ integrity sha512-RWzP96k/yv0PQfyXnWjs6zot20TqfpfsNXhOnev8d1InAxubW93L11/oNUc3tQqn2G0bSdAOBpX+2uDFHV7kdQ==
hookable@^5.5.3:
version "5.5.3"
@@ -26085,6 +26071,17 @@ redeyed@~1.0.0:
"@redis/search" "1.1.6"
"@redis/time-series" "1.0.5"
+"redis-5@npm:redis@^5.12.0":
+ version "5.12.1"
+ resolved "https://registry.yarnpkg.com/redis/-/redis-5.12.1.tgz#f95297e01eca8b87a109601a2418647e05f16bee"
+ integrity sha512-LDsoVvb/CpoV9EN3FXvgvSHNJWuCIzl9MiO3ppOevuGLpSGJhwfQjpEwfFJcQvNSddHADDdZaWx0HnmMxRXG7g==
+ dependencies:
+ "@redis/bloom" "5.12.1"
+ "@redis/client" "5.12.1"
+ "@redis/json" "5.12.1"
+ "@redis/search" "5.12.1"
+ "@redis/time-series" "5.12.1"
+
redis-errors@^1.0.0, redis-errors@^1.2.0:
version "1.2.0"
resolved "https://registry.yarnpkg.com/redis-errors/-/redis-errors-1.2.0.tgz#eb62d2adb15e4eaf4610c04afe1529384250abad"
@@ -26782,38 +26779,38 @@ rollup@^3.27.1, rollup@^3.28.1:
optionalDependencies:
fsevents "~2.3.2"
-rollup@^4.0.0, rollup@^4.20.0, rollup@^4.34.9, rollup@^4.35.0, rollup@^4.55.1, rollup@^4.59.0:
- version "4.59.0"
- resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.59.0.tgz#cf74edac17c1486f562d728a4d923a694abdf06f"
- integrity sha512-2oMpl67a3zCH9H79LeMcbDhXW/UmWG/y2zuqnF2jQq5uq9TbM9TVyXvA4+t+ne2IIkBdrLpAaRQAvo7YI/Yyeg==
+rollup@^4.20.0, rollup@^4.34.9, rollup@^4.55.1, rollup@^4.60.3:
+ version "4.60.3"
+ resolved "https://registry.yarnpkg.com/rollup/-/rollup-4.60.3.tgz#789258d41d090687d0ca7e80e8583d733711ddd3"
+ integrity sha512-pAQK9HalE84QSm4Po3EmWIZPd3FnjkShVkiMlz1iligWYkWQ7wHYd1PF/T7QZ5TVSD6uSTon5gBVMSM4JfBV+A==
dependencies:
"@types/estree" "1.0.8"
optionalDependencies:
- "@rollup/rollup-android-arm-eabi" "4.59.0"
- "@rollup/rollup-android-arm64" "4.59.0"
- "@rollup/rollup-darwin-arm64" "4.59.0"
- "@rollup/rollup-darwin-x64" "4.59.0"
- "@rollup/rollup-freebsd-arm64" "4.59.0"
- "@rollup/rollup-freebsd-x64" "4.59.0"
- "@rollup/rollup-linux-arm-gnueabihf" "4.59.0"
- "@rollup/rollup-linux-arm-musleabihf" "4.59.0"
- "@rollup/rollup-linux-arm64-gnu" "4.59.0"
- "@rollup/rollup-linux-arm64-musl" "4.59.0"
- "@rollup/rollup-linux-loong64-gnu" "4.59.0"
- "@rollup/rollup-linux-loong64-musl" "4.59.0"
- "@rollup/rollup-linux-ppc64-gnu" "4.59.0"
- "@rollup/rollup-linux-ppc64-musl" "4.59.0"
- "@rollup/rollup-linux-riscv64-gnu" "4.59.0"
- "@rollup/rollup-linux-riscv64-musl" "4.59.0"
- "@rollup/rollup-linux-s390x-gnu" "4.59.0"
- "@rollup/rollup-linux-x64-gnu" "4.59.0"
- "@rollup/rollup-linux-x64-musl" "4.59.0"
- "@rollup/rollup-openbsd-x64" "4.59.0"
- "@rollup/rollup-openharmony-arm64" "4.59.0"
- "@rollup/rollup-win32-arm64-msvc" "4.59.0"
- "@rollup/rollup-win32-ia32-msvc" "4.59.0"
- "@rollup/rollup-win32-x64-gnu" "4.59.0"
- "@rollup/rollup-win32-x64-msvc" "4.59.0"
+ "@rollup/rollup-android-arm-eabi" "4.60.3"
+ "@rollup/rollup-android-arm64" "4.60.3"
+ "@rollup/rollup-darwin-arm64" "4.60.3"
+ "@rollup/rollup-darwin-x64" "4.60.3"
+ "@rollup/rollup-freebsd-arm64" "4.60.3"
+ "@rollup/rollup-freebsd-x64" "4.60.3"
+ "@rollup/rollup-linux-arm-gnueabihf" "4.60.3"
+ "@rollup/rollup-linux-arm-musleabihf" "4.60.3"
+ "@rollup/rollup-linux-arm64-gnu" "4.60.3"
+ "@rollup/rollup-linux-arm64-musl" "4.60.3"
+ "@rollup/rollup-linux-loong64-gnu" "4.60.3"
+ "@rollup/rollup-linux-loong64-musl" "4.60.3"
+ "@rollup/rollup-linux-ppc64-gnu" "4.60.3"
+ "@rollup/rollup-linux-ppc64-musl" "4.60.3"
+ "@rollup/rollup-linux-riscv64-gnu" "4.60.3"
+ "@rollup/rollup-linux-riscv64-musl" "4.60.3"
+ "@rollup/rollup-linux-s390x-gnu" "4.60.3"
+ "@rollup/rollup-linux-x64-gnu" "4.60.3"
+ "@rollup/rollup-linux-x64-musl" "4.60.3"
+ "@rollup/rollup-openbsd-x64" "4.60.3"
+ "@rollup/rollup-openharmony-arm64" "4.60.3"
+ "@rollup/rollup-win32-arm64-msvc" "4.60.3"
+ "@rollup/rollup-win32-ia32-msvc" "4.60.3"
+ "@rollup/rollup-win32-x64-gnu" "4.60.3"
+ "@rollup/rollup-win32-x64-msvc" "4.60.3"
fsevents "~2.3.2"
rou3@^0.7.12:
@@ -28489,7 +28486,6 @@ stylus@0.59.0, stylus@^0.59.0:
sucrase@^3.27.0, sucrase@^3.35.0, sucrase@getsentry/sucrase#es2020-polyfills:
version "3.36.0"
- uid fd682f6129e507c00bb4e6319cc5d6b767e36061
resolved "https://codeload.github.com/getsentry/sucrase/tar.gz/fd682f6129e507c00bb4e6319cc5d6b767e36061"
dependencies:
"@jridgewell/gen-mapping" "^0.3.2"