Skip to content

Commit 62d3436

Browse files
authored
fix(core): Improve Vercel AI SDK instrumentation attributes (#19717)
This PR introduces some attributes and fixes to Vercel AI SDK: - Adds new [gen_ai.output.messages ](https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-output-messages) which deprecates https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-response-text and https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-response-tool_calls - Adds new [gen_ai.tool.description](https://getsentry.github.io/sentry-conventions/attributes/gen_ai/#gen_ai-tool-description) - Checks for Vercel AI media type when stripping media out of the input messages Closes #19574
1 parent 0d07936 commit 62d3436

File tree

14 files changed

+312
-56
lines changed

14 files changed

+312
-56
lines changed

.size-limit.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -326,7 +326,7 @@ module.exports = [
326326
import: createImport('init'),
327327
ignore: [...builtinModules, ...nodePrefixedBuiltinModules],
328328
gzip: true,
329-
limit: '175 KB',
329+
limit: '176 KB',
330330
},
331331
{
332332
name: '@sentry/node - without tracing',

dev-packages/e2e-tests/test-applications/nextjs-15/tests/ai-test.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => {
3434
expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id');
3535
expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider');
3636
expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?');
37-
expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!');
37+
expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!');
3838
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
3939
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
4040

4141
// Second AI call - explicitly enabled telemetry
4242
const secondPipelineSpan = aiPipelineSpans[0];
4343
expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?');
44-
expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!');
44+
expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!');
4545

4646
// Third AI call - with tool calls
4747
/* const thirdPipelineSpan = aiPipelineSpans[2];

dev-packages/e2e-tests/test-applications/nextjs-16/tests/ai-test.test.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -34,14 +34,14 @@ test('should create AI spans with correct attributes', async ({ page }) => {
3434
expect(firstPipelineSpan?.data?.['vercel.ai.model.id']).toBe('mock-model-id');
3535
expect(firstPipelineSpan?.data?.['vercel.ai.model.provider']).toBe('mock-provider');
3636
expect(firstPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the first span?');
37-
expect(firstPipelineSpan?.data?.['gen_ai.response.text']).toBe('First span here!');
37+
expect(firstPipelineSpan?.data?.['gen_ai.output.messages']).toContain('First span here!');
3838
expect(firstPipelineSpan?.data?.['gen_ai.usage.input_tokens']).toBe(10);
3939
expect(firstPipelineSpan?.data?.['gen_ai.usage.output_tokens']).toBe(20); */
4040

4141
// Second AI call - explicitly enabled telemetry
4242
const secondPipelineSpan = aiPipelineSpans[0];
4343
expect(secondPipelineSpan?.data?.['vercel.ai.prompt']).toContain('Where is the second span?');
44-
expect(secondPipelineSpan?.data?.['gen_ai.response.text']).toContain('Second span here!');
44+
expect(secondPipelineSpan?.data?.['gen_ai.output.messages']).toContain('Second span here!');
4545

4646
// Third AI call - with tool calls
4747
/* const thirdPipelineSpan = aiPipelineSpans[2];

dev-packages/node-integration-tests/suites/tracing/vercelai/scenario.mjs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ async function run() {
5151
}),
5252
tools: {
5353
getWeather: {
54+
description: 'Get the current weather for a location',
5455
parameters: z.object({ location: z.string() }),
5556
execute: async args => {
5657
return `Weather in ${args.location}: Sunny, 72°F`;

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 21 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,16 +5,16 @@ import {
55
GEN_AI_INPUT_MESSAGES_ATTRIBUTE,
66
GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
77
GEN_AI_OPERATION_NAME_ATTRIBUTE,
8+
GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE,
89
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
910
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
1011
GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE,
1112
GEN_AI_RESPONSE_ID_ATTRIBUTE,
1213
GEN_AI_RESPONSE_MODEL_ATTRIBUTE,
13-
GEN_AI_RESPONSE_TEXT_ATTRIBUTE,
14-
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
1514
GEN_AI_SYSTEM_ATTRIBUTE,
1615
GEN_AI_SYSTEM_INSTRUCTIONS_ATTRIBUTE,
1716
GEN_AI_TOOL_CALL_ID_ATTRIBUTE,
17+
GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE,
1818
GEN_AI_TOOL_INPUT_ATTRIBUTE,
1919
GEN_AI_TOOL_NAME_ATTRIBUTE,
2020
GEN_AI_TOOL_OUTPUT_ATTRIBUTE,
@@ -91,9 +91,10 @@ describe('Vercel AI integration', () => {
9191
data: {
9292
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
9393
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
94+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
95+
'[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
9496
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
9597
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
96-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
9798
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
9899
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
99100
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
@@ -119,11 +120,12 @@ describe('Vercel AI integration', () => {
119120
data: {
120121
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
121122
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
123+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
124+
'[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
122125
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
123126
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
124127
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
125128
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
126-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
127129
[GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
128130
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
129131
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -201,6 +203,7 @@ describe('Vercel AI integration', () => {
201203
status: 'ok',
202204
}),
203205
// Seventh span - tool call execution span
206+
// Note: gen_ai.tool.description is NOT present when sendDefaultPii: false because ai.prompt.tools is not recorded
204207
expect.objectContaining({
205208
data: {
206209
[GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
@@ -220,7 +223,7 @@ describe('Vercel AI integration', () => {
220223
};
221224

222225
const EXPECTED_AVAILABLE_TOOLS_JSON =
223-
'[{"type":"function","name":"getWeather","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]';
226+
'[{"type":"function","name":"getWeather","description":"Get the current weather for a location","parameters":{"type":"object","properties":{"location":{"type":"string"}},"required":["location"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}]';
224227

225228
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
226229
transaction: 'main',
@@ -230,9 +233,10 @@ describe('Vercel AI integration', () => {
230233
data: {
231234
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
232235
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the first span?"}]',
236+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
237+
'[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
233238
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
234239
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
235-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
236240
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
237241
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
238242
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
@@ -264,11 +268,12 @@ describe('Vercel AI integration', () => {
264268
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
265269
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]:
266270
'[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
271+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
272+
'[{"role":"assistant","parts":[{"type":"text","content":"First span here!"}],"finish_reason":"stop"}]',
267273
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
268274
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
269275
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
270276
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
271-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'First span here!',
272277
[GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
273278
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
274279
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -302,9 +307,10 @@ describe('Vercel AI integration', () => {
302307
data: {
303308
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
304309
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"Where is the second span?"}]',
310+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
311+
'[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
305312
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
306313
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
307-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
308314
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
309315
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
310316
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 30,
@@ -335,11 +341,12 @@ describe('Vercel AI integration', () => {
335341
data: {
336342
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
337343
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
344+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
345+
'[{"role":"assistant","parts":[{"type":"text","content":"Second span here!"}],"finish_reason":"stop"}]',
338346
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
339347
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['stop'],
340348
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
341349
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
342-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: expect.any(String),
343350
[GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
344351
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 10,
345352
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 20,
@@ -373,10 +380,10 @@ describe('Vercel AI integration', () => {
373380
data: {
374381
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
375382
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: '[{"role":"user","content":"What is the weather in San Francisco?"}]',
383+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
384+
'[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]',
376385
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
377386
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
378-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!',
379-
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
380387
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
381388
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
382389
[GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: 40,
@@ -408,12 +415,12 @@ describe('Vercel AI integration', () => {
408415
[GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE]: EXPECTED_AVAILABLE_TOOLS_JSON,
409416
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.any(String),
410417
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 1,
418+
[GEN_AI_OUTPUT_MESSAGES_ATTRIBUTE]:
419+
'[{"role":"assistant","parts":[{"type":"text","content":"Tool call completed!"},{"type":"tool_call","id":"call-1","name":"getWeather","arguments":"{ \\"location\\": \\"San Francisco\\" }"}],"finish_reason":"tool_call"}]',
411420
[GEN_AI_REQUEST_MODEL_ATTRIBUTE]: 'mock-model-id',
412421
[GEN_AI_RESPONSE_FINISH_REASONS_ATTRIBUTE]: ['tool-calls'],
413422
[GEN_AI_RESPONSE_ID_ATTRIBUTE]: expect.any(String),
414423
[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]: 'mock-model-id',
415-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Tool call completed!',
416-
[GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE]: expect.any(String),
417424
[GEN_AI_SYSTEM_ATTRIBUTE]: 'mock-provider',
418425
[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: 15,
419426
[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: 25,
@@ -447,6 +454,7 @@ describe('Vercel AI integration', () => {
447454
expect.objectContaining({
448455
data: {
449456
[GEN_AI_TOOL_CALL_ID_ATTRIBUTE]: 'call-1',
457+
[GEN_AI_TOOL_DESCRIPTION_ATTRIBUTE]: 'Get the current weather for a location',
450458
[GEN_AI_TOOL_INPUT_ATTRIBUTE]: expect.any(String),
451459
[GEN_AI_TOOL_NAME_ATTRIBUTE]: 'getWeather',
452460
[GEN_AI_TOOL_OUTPUT_ATTRIBUTE]: expect.any(String),
@@ -809,7 +817,6 @@ describe('Vercel AI integration', () => {
809817
data: expect.objectContaining({
810818
[GEN_AI_INPUT_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: 3,
811819
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringMatching(/^\[.*"(?:text|content)":"C+".*\]$/),
812-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to truncated messages',
813820
}),
814821
}),
815822
// Second call: Last message is small and kept intact
@@ -819,7 +826,6 @@ describe('Vercel AI integration', () => {
819826
[GEN_AI_INPUT_MESSAGES_ATTRIBUTE]: expect.stringContaining(
820827
'This is a small message that fits within the limit',
821828
),
822-
[GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: 'Response to small message',
823829
}),
824830
}),
825831
]),

dev-packages/node-integration-tests/suites/tracing/vercelai/v5/scenario.mjs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -47,6 +47,7 @@ async function run() {
4747
}),
4848
tools: {
4949
getWeather: tool({
50+
description: 'Get the current weather for a location',
5051
inputSchema: z.object({ location: z.string() }),
5152
execute: async ({ location }) => `Weather in ${location}: Sunny, 72°F`,
5253
}),

0 commit comments

Comments
 (0)