Skip to content

Commit 55254c6

Browse files
authored
fix(ai, llmobs): respect the functionId argument for vercel-ai operations (#6524)
* fix for ai apm spans to respect functionId as resource name * add functionId to llmobs span name
1 parent e71f693 commit 55254c6

File tree

5 files changed

+104
-6
lines changed

5 files changed

+104
-6
lines changed

packages/datadog-plugin-ai/src/tracing.js

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,14 +8,14 @@ class VercelAITracingPlugin extends TracingPlugin {
88
static prefix = 'tracing:dd-trace:vercel-ai'
99

1010
bindStart (ctx) {
11-
const attributes = ctx.attributes
11+
const { attributes, name } = ctx
1212

1313
const model = attributes['ai.model.id']
1414
const modelProvider = getModelProvider(attributes)
1515

16-
this.startSpan(ctx.name, {
16+
this.startSpan(name, {
1717
meta: {
18-
'resource.name': ctx.name,
18+
'resource.name': attributes['resource.name'] ?? name,
1919
'ai.request.model': model,
2020
'ai.request.model_provider': modelProvider
2121
}

packages/datadog-plugin-ai/test/index.spec.js

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -451,5 +451,37 @@ describe('Plugin', () => {
451451

452452
await checkTraces
453453
})
454+
455+
it('creates a span that respects the functionId', async () => {
456+
const checkTraces = agent.assertSomeTraces(traces => {
457+
const generateTextSpan = traces[0][0]
458+
const doGenerateSpan = traces[0][1]
459+
460+
assert.strictEqual(generateTextSpan.name, 'ai.generateText')
461+
assert.strictEqual(generateTextSpan.resource, 'test')
462+
assert.strictEqual(generateTextSpan.meta['ai.request.model'], 'gpt-4o-mini')
463+
assert.strictEqual(generateTextSpan.meta['ai.request.model_provider'], 'openai')
464+
465+
assert.strictEqual(doGenerateSpan.name, 'ai.generateText.doGenerate')
466+
assert.strictEqual(doGenerateSpan.resource, 'test')
467+
assert.strictEqual(doGenerateSpan.meta['ai.request.model'], 'gpt-4o-mini')
468+
assert.strictEqual(doGenerateSpan.meta['ai.request.model_provider'], 'openai')
469+
})
470+
471+
const result = await ai.generateText({
472+
model: openai('gpt-4o-mini'),
473+
system: 'You are a helpful assistant',
474+
prompt: 'Hello, OpenAI!',
475+
maxTokens: 100,
476+
temperature: 0.5,
477+
experimental_telemetry: {
478+
functionId: 'test'
479+
}
480+
})
481+
482+
assert.ok(result.text, 'Expected result to be truthy')
483+
484+
await checkTraces
485+
})
454486
})
455487
})

packages/dd-trace/src/llmobs/plugins/ai/index.js

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,8 @@ const {
1717
getModelMetadata,
1818
getGenerationMetadata,
1919
getToolNameFromTags,
20-
getToolCallResultContent
20+
getToolCallResultContent,
21+
getLlmObsSpanName
2122
} = require('./util')
2223

2324
const SPAN_NAME_TO_KIND_MAPPING = {
@@ -96,7 +97,7 @@ class VercelAILLMObsPlugin extends BaseLLMObsPlugin {
9697
const kind = SPAN_NAME_TO_KIND_MAPPING[operation]
9798
if (!kind) return
9899

99-
return { kind, name: operation }
100+
return { kind, name: getLlmObsSpanName(operation, ctx.attributes['ai.telemetry.functionId']) }
100101
}
101102

102103
setLLMObsTags (ctx) {

packages/dd-trace/src/llmobs/plugins/ai/util.js

Lines changed: 12 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,16 @@ function getToolCallResultContent (content) {
167167
}
168168
}
169169

170+
/**
171+
* Computes the LLM Observability `ai` span name
172+
* @param {string} operation
173+
* @param {string} functionId
174+
* @returns {string}
175+
*/
176+
function getLlmObsSpanName (operation, functionId) {
177+
return functionId ? `${functionId}.${operation}` : operation
178+
}
179+
170180
module.exports = {
171181
getSpanTags,
172182
getOperation,
@@ -175,5 +185,6 @@ module.exports = {
175185
getModelMetadata,
176186
getGenerationMetadata,
177187
getToolNameFromTags,
178-
getToolCallResultContent
188+
getToolCallResultContent,
189+
getLlmObsSpanName
179190
}

packages/dd-trace/test/llmobs/plugins/ai/index.spec.js

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -659,5 +659,59 @@ describe('Plugin', () => {
659659
expect(toolCallSpan).to.deepEqualWithMockValues(expectedToolCallSpan)
660660
expect(llmSpan2).to.deepEqualWithMockValues(expectedLlmSpan2)
661661
})
662+
663+
it('creates a span that respects the functionId', async () => {
664+
await ai.generateText({
665+
model: openai('gpt-4o-mini'),
666+
system: 'You are a helpful assistant',
667+
prompt: 'Hello, OpenAI!',
668+
maxTokens: 100,
669+
temperature: 0.5,
670+
experimental_telemetry: {
671+
functionId: 'test'
672+
}
673+
})
674+
675+
const { apmSpans, llmobsSpans } = await getEvents()
676+
677+
const expectedWorkflowSpan = expectedLLMObsNonLLMSpanEvent({
678+
span: apmSpans[0],
679+
name: 'test.generateText',
680+
spanKind: 'workflow',
681+
inputValue: 'Hello, OpenAI!',
682+
outputValue: MOCK_STRING,
683+
metadata: {
684+
maxTokens: 100,
685+
temperature: 0.5,
686+
maxSteps: MOCK_NUMBER,
687+
maxRetries: MOCK_NUMBER,
688+
},
689+
tokenMetrics: { input_tokens: MOCK_NUMBER, output_tokens: MOCK_NUMBER, total_tokens: MOCK_NUMBER },
690+
tags: { ml_app: 'test', language: 'javascript', integration: 'ai' },
691+
})
692+
693+
const expectedLlmSpan = expectedLLMObsLLMSpanEvent({
694+
span: apmSpans[1],
695+
parentId: llmobsSpans[0].span_id,
696+
spanKind: 'llm',
697+
modelName: 'gpt-4o-mini',
698+
modelProvider: 'openai',
699+
name: 'test.doGenerate',
700+
inputMessages: [
701+
{ content: 'You are a helpful assistant', role: 'system' },
702+
{ content: 'Hello, OpenAI!', role: 'user' }
703+
],
704+
outputMessages: [{ content: MOCK_STRING, role: 'assistant' }],
705+
metadata: {
706+
max_tokens: 100,
707+
temperature: 0.5,
708+
},
709+
tokenMetrics: { input_tokens: MOCK_NUMBER, output_tokens: MOCK_NUMBER, total_tokens: MOCK_NUMBER },
710+
tags: { ml_app: 'test', language: 'javascript', integration: 'ai' },
711+
})
712+
713+
expect(llmobsSpans[0]).to.deepEqualWithMockValues(expectedWorkflowSpan)
714+
expect(llmobsSpans[1]).to.deepEqualWithMockValues(expectedLlmSpan)
715+
})
662716
})
663717
})

0 commit comments

Comments
 (0)