diff --git a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts index 04ff4a0ac52c..afff2b1f8f28 100644 --- a/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts +++ b/dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts @@ -13,14 +13,7 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -28,6 +21,13 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -37,27 +37,26 @@ describe('Vercel AI integration', () => { // Second span - explicitly enabled telemetry but recordInputs/recordOutputs not set, should not record when sendDefaultPii: false expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -67,24 +66,25 @@ describe('Vercel AI integration', () => { // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -94,30 +94,29 @@ describe('Vercel AI integration', () => { // Fourth span - doGenerate for explicit telemetry enabled call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, + 'vercel.ai.prompt.format': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -127,14 +126,7 @@ describe('Vercel AI integration', () => { // Fifth span - tool call generateText span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -142,6 +134,13 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -151,16 +150,6 @@ describe('Vercel AI integration', () => { // Sixth span - tool call doGenerate span expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -172,6 +161,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -181,13 +179,13 @@ describe('Vercel AI integration', () => { // Seventh span - tool call execution span expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -206,50 +204,45 @@ describe('Vercel AI integration', () => { // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': 'First span here!', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the first span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'First span here!', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the first span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.prompt.format': 'prompt', 'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]', - 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'First span here!', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['stop'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'First span here!', 'gen_ai.system': 'mock-provider', 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, @@ -257,123 +250,142 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': 'prompt', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', - 'vercel.ai.response.finishReason': 'stop', - 'gen_ai.response.text': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"Where is the second span?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), 'gen_ai.usage.input_tokens': 10, 'gen_ai.usage.output_tokens': 20, 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"Where is the second span?"}', + 'vercel.ai.response.finishReason': 'stop', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Fourth span - doGenerate for explicitly enabled telemetry call expect.objectContaining({ data: { - 'sentry.origin': 'auto.vercelai.otel', - 'sentry.op': 'gen_ai.generate_text', + 'gen_ai.request.messages': expect.any(String), + 'gen_ai.request.model': 'mock-model-id', + 'gen_ai.response.finish_reasons': ['stop'], + 'gen_ai.response.id': expect.any(String), + 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': expect.any(String), + 'gen_ai.system': 'mock-provider', + 'gen_ai.usage.input_tokens': 10, + 'gen_ai.usage.output_tokens': 20, + 'gen_ai.usage.total_tokens': 30, 'operation.name': 'ai.generateText.doGenerate', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'sentry.op': 'gen_ai.generate_text', + 'sentry.origin': 'auto.vercelai.otel', 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.settings.maxRetries': 2, - 'gen_ai.system': 'mock-provider', - 'gen_ai.request.model': 'mock-model-id', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.streaming': false, + 'vercel.ai.prompt.format': expect.any(String), 'vercel.ai.response.finishReason': 'stop', - 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.id': expect.any(String), - 'gen_ai.response.text': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'gen_ai.response.finish_reasons': ['stop'], - 'gen_ai.usage.input_tokens': 10, - 'gen_ai.usage.output_tokens': 20, - 'gen_ai.response.id': expect.any(String), - 'gen_ai.response.model': 'mock-model-id', - 'gen_ai.usage.total_tokens': 30, + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', - 'vercel.ai.response.finishReason': 'tool-calls', - 'gen_ai.response.text': 'Tool call completed!', - 'gen_ai.response.tool_calls': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, 'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]', + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, 'gen_ai.usage.total_tokens': 40, 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true) expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.prompt.format': expect.any(String), - 'gen_ai.request.messages': expect.any(String), - 'vercel.ai.prompt.toolChoice': expect.any(String), 'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON, - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'gen_ai.response.text': 'Tool call completed!', - 'vercel.ai.response.timestamp': expect.any(String), - 'gen_ai.response.tool_calls': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, + 'gen_ai.request.messages': expect.any(String), 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), 'gen_ai.response.model': 'mock-model-id', + 'gen_ai.response.text': 'Tool call completed!', + 'gen_ai.response.tool_calls': expect.any(String), 'gen_ai.system': 'mock-provider', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -381,29 +393,50 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.prompt.format': expect.any(String), + 'vercel.ai.prompt.toolChoice': expect.any(String), + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), // Seventh span - tool call execution span expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', - 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.input': expect.any(String), + 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.output': expect.any(String), 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', origin: 'auto.vercelai.otel', status: 'ok', + parent_span_id: expect.any(String), + span_id: expect.any(String), + start_timestamp: expect.any(Number), + timestamp: expect.any(Number), + trace_id: expect.any(String), }), ]), }; @@ -427,13 +460,7 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -441,6 +468,12 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -449,16 +482,6 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -470,6 +493,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -478,13 +510,13 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -548,13 +580,7 @@ describe('Vercel AI integration', () => { spans: expect.arrayContaining([ expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText', - 'vercel.ai.pipeline.name': 'generateText', - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.settings.maxSteps': 1, - 'vercel.ai.streaming': false, + 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.model': 'mock-model-id', 'gen_ai.usage.input_tokens': 15, 'gen_ai.usage.output_tokens': 25, @@ -562,6 +588,12 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText', 'sentry.op': 'gen_ai.invoke_agent', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText', + 'vercel.ai.pipeline.name': 'generateText', + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.settings.maxSteps': 1, + 'vercel.ai.streaming': false, }, description: 'generateText', op: 'gen_ai.invoke_agent', @@ -570,16 +602,6 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.model.id': 'mock-model-id', - 'vercel.ai.model.provider': 'mock-provider', - 'vercel.ai.operationId': 'ai.generateText.doGenerate', - 'vercel.ai.pipeline.name': 'generateText.doGenerate', - 'vercel.ai.response.finishReason': 'tool-calls', - 'vercel.ai.response.id': expect.any(String), - 'vercel.ai.response.model': 'mock-model-id', - 'vercel.ai.response.timestamp': expect.any(String), - 'vercel.ai.settings.maxRetries': 2, - 'vercel.ai.streaming': false, 'gen_ai.request.model': 'mock-model-id', 'gen_ai.response.finish_reasons': ['tool-calls'], 'gen_ai.response.id': expect.any(String), @@ -591,6 +613,15 @@ describe('Vercel AI integration', () => { 'operation.name': 'ai.generateText.doGenerate', 'sentry.op': 'gen_ai.generate_text', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.model.provider': 'mock-provider', + 'vercel.ai.operationId': 'ai.generateText.doGenerate', + 'vercel.ai.pipeline.name': 'generateText.doGenerate', + 'vercel.ai.response.finishReason': 'tool-calls', + 'vercel.ai.response.id': expect.any(String), + 'vercel.ai.response.model': 'mock-model-id', + 'vercel.ai.response.timestamp': expect.any(String), + 'vercel.ai.settings.maxRetries': 2, + 'vercel.ai.streaming': false, }, description: 'generate_text mock-model-id', op: 'gen_ai.generate_text', @@ -599,13 +630,13 @@ describe('Vercel AI integration', () => { }), expect.objectContaining({ data: { - 'vercel.ai.operationId': 'ai.toolCall', 'gen_ai.tool.call.id': 'call-1', 'gen_ai.tool.name': 'getWeather', 'gen_ai.tool.type': 'function', 'operation.name': 'ai.toolCall', 'sentry.op': 'gen_ai.execute_tool', 'sentry.origin': 'auto.vercelai.otel', + 'vercel.ai.operationId': 'ai.toolCall', }, description: 'execute_tool getWeather', op: 'gen_ai.execute_tool', @@ -642,7 +673,7 @@ describe('Vercel AI integration', () => { let transactionEvent: Event | undefined; let errorEvent: Event | undefined; - const runner = await createRunner() + const runner = createRunner() .expect({ transaction: transaction => { transactionEvent = transaction; diff --git a/packages/core/src/tracing/vercel-ai/index.ts b/packages/core/src/tracing/vercel-ai/index.ts index f07244088ff9..8ee973197847 100644 --- a/packages/core/src/tracing/vercel-ai/index.ts +++ b/packages/core/src/tracing/vercel-ai/index.ts @@ -10,7 +10,12 @@ import { import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; -import { accumulateTokensForParent, applyAccumulatedTokens, convertAvailableToolsToJsonString } from './utils'; +import { + accumulateTokensForParent, + applyAccumulatedTokens, + convertAvailableToolsToJsonString, + requestMessagesFromPrompt, +} from './utils'; import type { ProviderMetadata } from './vercel-ai-attributes'; import { AI_MODEL_ID_ATTRIBUTE, @@ -141,6 +146,7 @@ function processEndedVercelAiSpan(span: SpanJSON): void { renameAttributeKey(attributes, AI_TOOL_CALL_RESULT_ATTRIBUTE, 'gen_ai.tool.output'); renameAttributeKey(attributes, AI_SCHEMA_ATTRIBUTE, 'gen_ai.request.schema'); + renameAttributeKey(attributes, AI_MODEL_ID_ATTRIBUTE, 'gen_ai.request.model'); addProviderMetadataToAttributes(attributes); @@ -206,6 +212,10 @@ function processGenerateSpan(span: Span, name: string, attributes: SpanAttribute if (attributes[AI_PROMPT_ATTRIBUTE]) { const truncatedPrompt = getTruncatedJsonString(attributes[AI_PROMPT_ATTRIBUTE] as string | string[]); span.setAttribute('gen_ai.prompt', truncatedPrompt); + + if (!attributes['gen_ai.request.messages'] && !attributes[AI_PROMPT_MESSAGES_ATTRIBUTE]) { + requestMessagesFromPrompt(span, attributes[AI_PROMPT_ATTRIBUTE]); + } } if (attributes[AI_MODEL_ID_ATTRIBUTE] && !attributes[GEN_AI_RESPONSE_MODEL_ATTRIBUTE]) { span.setAttribute(GEN_AI_RESPONSE_MODEL_ATTRIBUTE, attributes[AI_MODEL_ID_ATTRIBUTE]); diff --git a/packages/core/src/tracing/vercel-ai/utils.ts b/packages/core/src/tracing/vercel-ai/utils.ts index 9a0b57eb16f7..6be627264686 100644 --- a/packages/core/src/tracing/vercel-ai/utils.ts +++ b/packages/core/src/tracing/vercel-ai/utils.ts @@ -1,6 +1,7 @@ import type { TraceContext } from '../../types-hoist/context'; import type { Span, SpanJSON } from '../../types-hoist/span'; import { GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE } from '../ai/gen-ai-attributes'; +import { getTruncatedJsonString } from '../ai/utils'; import { toolCallSpanMap } from './constants'; import type { TokenSummary } from './types'; @@ -87,3 +88,39 @@ export function convertAvailableToolsToJsonString(tools: unknown[]): string { }); return JSON.stringify(toolObjects); } + +/** + * Convert the prompt string to messages array + */ +export function convertPromptToMessages(prompt: string): { role: string; content: string }[] | undefined { + try { + const p = JSON.parse(prompt); + if (!!p && typeof p === 'object') { + const { prompt, system } = p; + if (typeof prompt === 'string' || typeof system === 'string') { + const messages: { role: string; content: string }[] = []; + if (typeof system === 'string') { + messages.push({ role: 'system', content: system }); + } + if (typeof prompt === 'string') { + messages.push({ role: 'user', content: prompt }); + } + return messages.length ? messages : []; + } + } + // eslint-disable-next-line no-empty + } catch {} + return undefined; +} + +/** + * Generate a request.messages JSON array from the prompt field in the + * invoke_agent op + */ +export function requestMessagesFromPrompt(span: Span, prompt: unknown): void { + if (typeof prompt !== 'string') return; + const maybeMessages = convertPromptToMessages(prompt); + if (maybeMessages !== undefined) { + span.setAttribute('gen_ai.request.messages', getTruncatedJsonString(maybeMessages)); + } +} diff --git a/packages/core/test/lib/utils/vercelai-utils.test.ts b/packages/core/test/lib/utils/vercelai-utils.test.ts new file mode 100644 index 000000000000..d0161a39664d --- /dev/null +++ b/packages/core/test/lib/utils/vercelai-utils.test.ts @@ -0,0 +1,55 @@ +import { describe, expect, it } from 'vitest'; +import { convertPromptToMessages } from '../../../src/tracing/vercel-ai/utils'; + +describe('vercel-ai-utils', () => { + describe('convertPromptToMessages', () => { + it('should convert a prompt with system to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + system: 'You are a friendly robot', + prompt: 'Hello, robot', + }), + ), + ).toStrictEqual([ + { role: 'system', content: 'You are a friendly robot' }, + { role: 'user', content: 'Hello, robot' }, + ]); + }); + + it('should convert a system prompt to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + system: 'You are a friendly robot', + }), + ), + ).toStrictEqual([{ role: 'system', content: 'You are a friendly robot' }]); + }); + + it('should convert a user only prompt to a messages array', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + prompt: 'Hello, robot', + }), + ), + ).toStrictEqual([{ role: 'user', content: 'Hello, robot' }]); + }); + + it('should ignore unexpected data', () => { + expect( + convertPromptToMessages( + JSON.stringify({ + randomField: 'Hello, robot', + nothing: 'that we know how to handle', + }), + ), + ).toBe(undefined); + }); + + it('should not break on invalid json', () => { + expect(convertPromptToMessages('this is not json')).toBe(undefined); + }); + }); +});