diff --git a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts index 8263eca8..f6f07645 100644 --- a/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts +++ b/packages/traceloop-sdk/src/lib/tracing/ai-sdk-transformations.ts @@ -1,15 +1,19 @@ -import { ReadableSpan } from "@opentelemetry/sdk-trace-node"; +import { ReadableSpan, Span } from "@opentelemetry/sdk-trace-node"; import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; +const AI_GENERATE_TEXT = "ai.generateText"; const AI_GENERATE_TEXT_DO_GENERATE = "ai.generateText.doGenerate"; const AI_GENERATE_OBJECT_DO_GENERATE = "ai.generateObject.doGenerate"; const AI_STREAM_TEXT_DO_STREAM = "ai.streamText.doStream"; const HANDLED_SPAN_NAMES: Record = { - [AI_GENERATE_TEXT_DO_GENERATE]: "ai.generateText.generate", - [AI_GENERATE_OBJECT_DO_GENERATE]: "ai.generateObject.generate", - [AI_STREAM_TEXT_DO_STREAM]: "ai.streamText.stream", + [AI_GENERATE_TEXT]: "run.ai", + [AI_GENERATE_TEXT_DO_GENERATE]: "text.generate", + [AI_GENERATE_OBJECT_DO_GENERATE]: "object.generate", + [AI_STREAM_TEXT_DO_STREAM]: "text.stream", }; +const TOOL_SPAN_NAME = "ai.toolCall"; + const AI_RESPONSE_TEXT = "ai.response.text"; const AI_RESPONSE_OBJECT = "ai.response.object"; const AI_RESPONSE_TOOL_CALLS = "ai.response.toolCalls"; @@ -19,6 +23,7 @@ const AI_USAGE_PROMPT_TOKENS = "ai.usage.promptTokens"; const AI_USAGE_COMPLETION_TOKENS = "ai.usage.completionTokens"; const AI_MODEL_PROVIDER = "ai.model.provider"; const AI_PROMPT_TOOLS = "ai.prompt.tools"; +const AI_TELEMETRY_METADATA_PREFIX = "ai.telemetry.metadata."; const TYPE_TEXT = "text"; const TYPE_TOOL_CALL = "tool_call"; const ROLE_ASSISTANT = "assistant"; @@ -47,14 +52,6 @@ const VENDOR_MAPPING: Record = { openrouter: "OpenRouter", }; -export const transformAiSdkSpanName = (span: ReadableSpan): void => { - // Unfortunately, the span name is not writable as this is not the intended behavior - // but it is a workaround to set the correct span name - if (span.name in HANDLED_SPAN_NAMES) { - (span as any).name = HANDLED_SPAN_NAMES[span.name]; - } -}; - const transformResponseText = (attributes: Record): void => { if (AI_RESPONSE_TEXT in attributes) { attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`] = @@ -367,9 +364,41 @@ const transformVendor = (attributes: Record): void => { } }; -export const transformAiSdkAttributes = ( - attributes: Record, -): void => { +const transformTelemetryMetadata = (attributes: Record): void => { + const metadataAttributes: Record = {}; + const keysToDelete: string[] = []; + + // Find all ai.telemetry.metadata.* attributes + for (const [key, value] of Object.entries(attributes)) { + if (key.startsWith(AI_TELEMETRY_METADATA_PREFIX)) { + const metadataKey = key.substring(AI_TELEMETRY_METADATA_PREFIX.length); + + // Always mark for deletion since it's a telemetry metadata attribute + keysToDelete.push(key); + + if (metadataKey && value != null) { + // Convert value to string for association properties + const stringValue = typeof value === "string" ? value : String(value); + metadataAttributes[metadataKey] = stringValue; + + // Also set as traceloop association property attribute + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.${metadataKey}` + ] = stringValue; + } + } + } + + // Remove original ai.telemetry.metadata.* attributes + keysToDelete.forEach((key) => { + delete attributes[key]; + }); + + // Note: Context setting for child span inheritance should be done before span creation, + // not during transformation. Use `withTelemetryMetadataContext` function for context propagation. +}; + +export const transformLLMSpans = (attributes: Record): void => { transformResponseText(attributes); transformResponseObject(attributes); transformResponseToolCalls(attributes); @@ -379,16 +408,40 @@ export const transformAiSdkAttributes = ( transformCompletionTokens(attributes); calculateTotalTokens(attributes); transformVendor(attributes); + transformTelemetryMetadata(attributes); +}; + +const transformToolCalls = (span: ReadableSpan): void => { + if ( + span.attributes["ai.toolCall.args"] && + span.attributes["ai.toolCall.result"] + ) { + span.attributes[SpanAttributes.TRACELOOP_ENTITY_INPUT] = + span.attributes["ai.toolCall.args"]; + delete span.attributes["ai.toolCall.args"]; + span.attributes[SpanAttributes.TRACELOOP_ENTITY_OUTPUT] = + span.attributes["ai.toolCall.result"]; + delete span.attributes["ai.toolCall.result"]; + } }; const shouldHandleSpan = (span: ReadableSpan): boolean => { - return span.name in HANDLED_SPAN_NAMES; + return span.instrumentationScope?.name === "ai"; +}; + +export const transformAiSdkSpanNames = (span: Span): void => { + if (span.name === TOOL_SPAN_NAME) { + span.updateName(`${span.attributes["ai.toolCall.name"] as string}.tool`); + } + if (span.name in HANDLED_SPAN_NAMES) { + span.updateName(HANDLED_SPAN_NAMES[span.name]); + } }; -export const transformAiSdkSpan = (span: ReadableSpan): void => { +export const transformAiSdkSpanAttributes = (span: ReadableSpan): void => { if (!shouldHandleSpan(span)) { return; } - transformAiSdkSpanName(span); - transformAiSdkAttributes(span.attributes); + transformLLMSpans(span.attributes); + transformToolCalls(span); }; diff --git a/packages/traceloop-sdk/src/lib/tracing/span-processor.ts b/packages/traceloop-sdk/src/lib/tracing/span-processor.ts index 3215d019..6437797b 100644 --- a/packages/traceloop-sdk/src/lib/tracing/span-processor.ts +++ b/packages/traceloop-sdk/src/lib/tracing/span-processor.ts @@ -2,9 +2,10 @@ import { SimpleSpanProcessor, BatchSpanProcessor, SpanProcessor, + Span, ReadableSpan, } from "@opentelemetry/sdk-trace-node"; -import { Span, context } from "@opentelemetry/api"; +import { context } from "@opentelemetry/api"; import { OTLPTraceExporter } from "@opentelemetry/exporter-trace-otlp-proto"; import { SpanExporter } from "@opentelemetry/sdk-trace-base"; import { @@ -13,7 +14,10 @@ import { WORKFLOW_NAME_KEY, } from "./tracing"; import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; -import { transformAiSdkSpan } from "./ai-sdk-transformations"; +import { + transformAiSdkSpanAttributes, + transformAiSdkSpanNames, +} from "./ai-sdk-transformations"; import { parseKeyPairsIntoRecord } from "./baggage-utils"; export const ALL_INSTRUMENTATION_LIBRARIES = "all" as const; @@ -155,6 +159,8 @@ const onSpanStart = (span: Span): void => { ); } } + + transformAiSdkSpanNames(span); }; /** @@ -220,7 +226,7 @@ const onSpanEnd = ( } // Apply AI SDK transformations (if needed) - transformAiSdkSpan(span); + transformAiSdkSpanAttributes(span); // Ensure OTLP transformer compatibility const compatibleSpan = ensureSpanCompatibility(span); diff --git a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts b/packages/traceloop-sdk/test/ai-sdk-integration.test.ts index 52cb29bb..4e0d3c04 100644 --- a/packages/traceloop-sdk/test/ai-sdk-integration.test.ts +++ b/packages/traceloop-sdk/test/ai-sdk-integration.test.ts @@ -105,17 +105,15 @@ describe("Test AI SDK Integration with Recording", function () { const spans = memoryExporter.getFinishedSpans(); const generateTextSpan = spans.find( - (span) => - span.name === "ai.generateText.generate" || - span.name === "ai.generateText.doGenerate", + (span) => span.name === "text.generate", ); assert.ok(result); assert.ok(result.text); assert.ok(generateTextSpan); - // Verify span name - assert.strictEqual(generateTextSpan.name, "ai.generateText.generate"); + // Verify span name (should be transformed from ai.generateText.doGenerate to text.generate) + assert.strictEqual(generateTextSpan.name, "text.generate"); // Verify vendor assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "OpenAI"); @@ -174,8 +172,7 @@ describe("Test AI SDK Integration with Recording", function () { // Find the Google span specifically (should have workflow name test_google_workflow) const generateTextSpan = spans.find( (span) => - (span.name === "ai.generateText.generate" || - span.name === "ai.generateText.doGenerate") && + span.name === "text.generate" && span.attributes["traceloop.workflow.name"] === "test_google_workflow", ); @@ -183,8 +180,8 @@ describe("Test AI SDK Integration with Recording", function () { assert.ok(result.text); assert.ok(generateTextSpan, "Could not find Google generateText span"); - // Verify span name - assert.strictEqual(generateTextSpan.name, "ai.generateText.generate"); + // Verify span name (should be transformed from ai.generateText.doGenerate to text.generate) + assert.strictEqual(generateTextSpan.name, "text.generate"); // Verify vendor assert.strictEqual(generateTextSpan.attributes["gen_ai.system"], "Google"); @@ -236,9 +233,7 @@ describe("Test AI SDK Integration with Recording", function () { assert.ok(result.text); const spans = memoryExporter.getFinishedSpans(); - const aiSdkSpan = spans.find((span) => - span.name.startsWith("ai.generateText"), - ); + const aiSdkSpan = spans.find((span) => span.name === "text.generate"); assert.ok(aiSdkSpan); diff --git a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts b/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts index 8ec61e68..07ef9b37 100644 --- a/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts +++ b/packages/traceloop-sdk/test/ai-sdk-transformations.test.ts @@ -1,9 +1,12 @@ import * as assert from "assert"; import { ReadableSpan } from "@opentelemetry/sdk-trace-node"; import { SpanAttributes } from "@traceloop/ai-semantic-conventions"; +import { context } from "@opentelemetry/api"; +import { ASSOCATION_PROPERTIES_KEY } from "../src/lib/tracing/tracing"; import { - transformAiSdkAttributes, - transformAiSdkSpan, + transformLLMSpans, + transformAiSdkSpanAttributes, + transformAiSdkSpanNames, } from "../src/lib/tracing/ai-sdk-transformations"; // Helper function to create a mock ReadableSpan @@ -14,9 +17,26 @@ const createMockSpan = ( return { name, attributes, + instrumentationScope: { name: "ai", version: "1.0.0" }, } as ReadableSpan; }; +// Helper function to create a mock span with updateName capability +const createMockSpanWithUpdate = ( + name: string, + attributes: Record = {}, +) => { + const span = { + name, + attributes, + instrumentationScope: { name: "ai", version: "1.0.0" }, + updateName: (newName: string) => { + span.name = newName; + }, + }; + return span as ReadableSpan & { updateName: (name: string) => void }; +}; + describe("AI SDK Transformations", () => { describe("transformAiSdkAttributes - response text", () => { it("should transform ai.response.text to completion attributes", () => { @@ -25,7 +45,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], @@ -45,7 +65,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -55,7 +75,7 @@ describe("AI SDK Transformations", () => { "ai.response.text": "", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], @@ -76,7 +96,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], @@ -96,7 +116,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -124,7 +144,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check that role is set assert.strictEqual( @@ -167,7 +187,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -178,7 +198,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should not modify attributes when JSON parsing fails assert.strictEqual(attributes["ai.response.toolCalls"], "invalid json {"); @@ -196,7 +216,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -228,7 +248,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -257,7 +277,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -284,7 +304,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -308,7 +328,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -332,7 +352,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should preserve the original JSON since it's not simple text assert.strictEqual( @@ -357,7 +377,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should preserve the original JSON since it has mixed content assert.strictEqual( @@ -376,7 +396,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should not modify attributes when JSON parsing fails assert.strictEqual(attributes["ai.prompt.messages"], "invalid json {"); @@ -389,7 +409,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -399,7 +419,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify([]), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes["ai.prompt.messages"], undefined); }); @@ -410,7 +430,7 @@ describe("AI SDK Transformations", () => { '[{"role":"user","content":[{"type":"text","text":"Help me plan a trip to San Francisco. I\'d like to know:\\n1. What\'s the weather like there?\\n2. Find some good restaurants to try\\n3. If I\'m traveling from New York, how far is it?\\n\\nPlease use the available tools to get current information and provide a comprehensive travel guide."}]}]', }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); const result = attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`]; @@ -437,7 +457,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], @@ -457,7 +477,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -468,7 +488,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should not modify attributes when JSON parsing fails assert.strictEqual(attributes["ai.prompt"], "invalid json {"); @@ -509,7 +529,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], @@ -577,7 +597,7 @@ describe("AI SDK Transformations", () => { ], }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Tool 0: only has name assert.strictEqual( @@ -622,7 +642,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should not create any function attributes assert.strictEqual( @@ -641,7 +661,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should not create any function attributes assert.strictEqual( @@ -659,7 +679,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes.someOtherAttr, "value"); assert.strictEqual( @@ -673,7 +693,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.tools": [null, undefined, {}, { name: "validTool" }], }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Only the valid tool should create attributes assert.strictEqual( @@ -705,7 +725,7 @@ describe("AI SDK Transformations", () => { ], }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Should parse and transform the first tool assert.strictEqual( @@ -745,7 +765,7 @@ describe("AI SDK Transformations", () => { ], }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_REQUEST_FUNCTIONS}.0.name`], @@ -773,7 +793,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], @@ -789,7 +809,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -799,7 +819,7 @@ describe("AI SDK Transformations", () => { "ai.usage.promptTokens": 0, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], 0); }); @@ -812,7 +832,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], @@ -828,7 +848,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -838,7 +858,7 @@ describe("AI SDK Transformations", () => { "ai.usage.completionTokens": 0, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], @@ -854,7 +874,7 @@ describe("AI SDK Transformations", () => { [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 75); }); @@ -865,7 +885,7 @@ describe("AI SDK Transformations", () => { [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: "25", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 75); }); @@ -875,7 +895,7 @@ describe("AI SDK Transformations", () => { [SpanAttributes.LLM_USAGE_COMPLETION_TOKENS]: 25, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], @@ -888,7 +908,7 @@ describe("AI SDK Transformations", () => { [SpanAttributes.LLM_USAGE_PROMPT_TOKENS]: 50, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], @@ -899,7 +919,7 @@ describe("AI SDK Transformations", () => { it("should not calculate total when both tokens are missing", () => { const attributes = {}; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], @@ -915,7 +935,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); assert.strictEqual(attributes["ai.model.provider"], undefined); @@ -934,7 +954,7 @@ describe("AI SDK Transformations", () => { "ai.model.provider": provider, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); assert.strictEqual(attributes["ai.model.provider"], undefined); @@ -949,7 +969,7 @@ describe("AI SDK Transformations", () => { "ai.model.provider": provider, }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "Azure"); assert.strictEqual(attributes["ai.model.provider"], undefined); @@ -961,7 +981,7 @@ describe("AI SDK Transformations", () => { "ai.model.provider": "anthropic", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "Anthropic"); assert.strictEqual(attributes["ai.model.provider"], undefined); @@ -973,7 +993,7 @@ describe("AI SDK Transformations", () => { }; const originalAttributes = { ...attributes }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.deepStrictEqual(attributes, originalAttributes); }); @@ -983,7 +1003,7 @@ describe("AI SDK Transformations", () => { "ai.model.provider": "", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], ""); assert.strictEqual(attributes["ai.model.provider"], undefined); @@ -1001,7 +1021,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check response text transformation assert.strictEqual( @@ -1054,7 +1074,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); assert.strictEqual( attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], @@ -1073,7 +1093,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check response object transformation assert.strictEqual( @@ -1141,7 +1161,7 @@ describe("AI SDK Transformations", () => { someOtherAttr: "value", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check tools transformation assert.strictEqual( @@ -1192,7 +1212,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(messages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check that gen_ai.input.messages is properly set assert.strictEqual( @@ -1239,7 +1259,7 @@ describe("AI SDK Transformations", () => { "ai.response.text": "I'd be happy to help you with that!", }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check that gen_ai.output.messages is properly set assert.strictEqual( @@ -1280,7 +1300,7 @@ describe("AI SDK Transformations", () => { "ai.response.toolCalls": JSON.stringify(toolCallsData), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check that gen_ai.output.messages is properly set assert.strictEqual( @@ -1379,7 +1399,7 @@ describe("AI SDK Transformations", () => { ], }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check input messages assert.strictEqual( @@ -1448,7 +1468,7 @@ describe("AI SDK Transformations", () => { "ai.response.object": JSON.stringify(objectResponse), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check that gen_ai.output.messages is properly set assert.strictEqual( @@ -1497,7 +1517,7 @@ describe("AI SDK Transformations", () => { "ai.prompt.messages": JSON.stringify(complexMessages), }; - transformAiSdkAttributes(attributes); + transformLLMSpans(attributes); // Check input messages transformation const inputMessages = JSON.parse( @@ -1535,126 +1555,195 @@ describe("AI SDK Transformations", () => { }); }); - describe("transformAiSdkSpan", () => { - it("should transform both span name and attributes", () => { - const span = createMockSpan("ai.generateText.doGenerate", { + describe("transformAiSdkAttributes - telemetry metadata", () => { + it("should transform ai.telemetry.metadata.* attributes to association properties", () => { + const attributes = { + "ai.telemetry.metadata.userId": "user_123", + "ai.telemetry.metadata.sessionId": "session_456", + "ai.telemetry.metadata.experimentId": "exp_789", "ai.response.text": "Hello!", - "ai.usage.promptTokens": 10, - "ai.usage.completionTokens": 5, - }); + someOtherAttr: "value", + }; + + transformLLMSpans(attributes); - transformAiSdkSpan(span); + // Check that association properties are created + assert.strictEqual( + attributes[`${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.userId`], + "user_123", + ); + assert.strictEqual( + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.sessionId` + ], + "session_456", + ); + assert.strictEqual( + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.experimentId` + ], + "exp_789", + ); - // Check span name transformation - assert.strictEqual(span.name, "ai.generateText.generate"); + // Check that original metadata attributes are removed + assert.strictEqual(attributes["ai.telemetry.metadata.userId"], undefined); + assert.strictEqual( + attributes["ai.telemetry.metadata.sessionId"], + undefined, + ); + assert.strictEqual( + attributes["ai.telemetry.metadata.experimentId"], + undefined, + ); - // Check attribute transformations + // Check that other transformations still work assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], + attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], "Hello!", ); + assert.strictEqual(attributes.someOtherAttr, "value"); + }); + + it("should handle non-string metadata values by converting to strings", () => { + const attributes = { + "ai.telemetry.metadata.userId": 12345, + "ai.telemetry.metadata.isActive": true, + "ai.telemetry.metadata.score": 98.5, + "ai.telemetry.metadata.config": { key: "value" }, + }; + + transformLLMSpans(attributes); + assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - 10, + attributes[`${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.userId`], + "12345", ); assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - 5, + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.isActive` + ], + "true", + ); + assert.strictEqual( + attributes[`${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.score`], + "98.5", ); assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - 15, + attributes[`${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.config`], + "[object Object]", ); }); - it("should transform generateObject span name and attributes", () => { - const span = createMockSpan("ai.generateObject.doGenerate", { - "ai.prompt.format": "prompt", - "llm.usage.output_tokens": "39", - "traceloop.workflow.name": "generate_person_profile", - "llm.request.model": "gpt-4o", - "ai.settings.maxRetries": "2", - "ai.usage.promptTokens": "108", - "operation.name": "ai.generateObject.doGenerate", - "llm.response.id": "chatcmpl-C82mjzq1hNM753oc4VkStnjEzzLpk", - "ai.response.providerMetadata": - '{"openai":{"reasoningTokens":0,"acceptedPredictionTokens":0,"rejectedPredictionTokens":0,"cachedPromptTokens":0}}', - "ai.operationId": "ai.generateObject.doGenerate", - "ai.response.id": "chatcmpl-C82mjzq1hNM753oc4VkStnjEzzLpk", - "ai.usage.completionTokens": "39", - "ai.response.model": "gpt-4o-2024-08-06", - "ai.response.object": - '{"name":"Alex Dupont","age":30,"occupation":"Software Engineer","skills":["AI","Machine Learning","Programming","Multilingual"],"location":{"city":"Paris","country":"France"}}', - "ai.prompt.messages": - '[{"role":"user","content":[{"type":"text","text":"Based on this description, generate a detailed person profile: A talented software engineer from Paris who loves working with AI and machine learning, speaks multiple languages, and enjoys traveling."}]}]', - "ai.settings.mode": "tool", - "llm.vendor": "openai.chat", - "ai.response.timestamp": "2025-08-24T11:02:45.000Z", - "llm.response.model": "gpt-4o-2024-08-06", - "ai.model.id": "gpt-4o", - "ai.response.finishReason": "stop", - "ai.model.provider": "openai.chat", - "llm.usage.input_tokens": "108", - }); - - transformAiSdkSpan(span); + it("should ignore metadata attributes with null or undefined values", () => { + const attributes = { + "ai.telemetry.metadata.validKey": "valid_value", + "ai.telemetry.metadata.nullKey": null, + "ai.telemetry.metadata.undefinedKey": undefined, + "ai.telemetry.metadata.emptyKey": "", + someOtherAttr: "value", + }; - // Check span name transformation - assert.strictEqual(span.name, "ai.generateObject.generate"); + transformLLMSpans(attributes); - // Check attribute transformations + // Valid key should be processed assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], - '{"name":"Alex Dupont","age":30,"occupation":"Software Engineer","skills":["AI","Machine Learning","Programming","Multilingual"],"location":{"city":"Paris","country":"France"}}', + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.validKey` + ], + "valid_value", ); + + // Empty string should be processed (it's a valid value) assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.role`], - "assistant", + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.emptyKey` + ], + "", ); + + // Null and undefined should not create association properties assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], - "Based on this description, generate a detailed person profile: A talented software engineer from Paris who loves working with AI and machine learning, speaks multiple languages, and enjoys traveling.", + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.nullKey` + ], + undefined, ); assert.strictEqual( - span.attributes[`${SpanAttributes.LLM_PROMPTS}.0.role`], - "user", + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.undefinedKey` + ], + undefined, ); + + // Original attributes should be removed only for processed ones assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_PROMPT_TOKENS], - "108", + attributes["ai.telemetry.metadata.validKey"], + undefined, ); assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_COMPLETION_TOKENS], - "39", + attributes["ai.telemetry.metadata.emptyKey"], + undefined, ); assert.strictEqual( - span.attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], - 147, + attributes["ai.telemetry.metadata.nullKey"], + undefined, ); - assert.strictEqual(span.attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - - // Check that original AI SDK attributes are removed - assert.strictEqual(span.attributes["ai.response.object"], undefined); - assert.strictEqual(span.attributes["ai.prompt.messages"], undefined); - assert.strictEqual(span.attributes["ai.usage.promptTokens"], undefined); assert.strictEqual( - span.attributes["ai.usage.completionTokens"], + attributes["ai.telemetry.metadata.undefinedKey"], undefined, ); - assert.strictEqual(span.attributes["ai.model.provider"], undefined); + + assert.strictEqual(attributes.someOtherAttr, "value"); }); - it("should handle spans with no transformations needed", () => { - const span = createMockSpan("some.other.span", { - someAttr: "value", - }); - const originalName = span.name; - const originalAttributes = { ...span.attributes }; + it("should work with other transformations in a complete AI SDK call", () => { + const attributes = { + "ai.telemetry.metadata.userId": "user_456", + "ai.telemetry.metadata.sessionId": "session_789", + "ai.response.text": "I'll help you with that!", + "ai.prompt.messages": JSON.stringify([ + { role: "user", content: "Help me" }, + ]), + "ai.usage.promptTokens": 5, + "ai.usage.completionTokens": 10, + "ai.model.provider": "openai.chat", + }; + + transformLLMSpans(attributes); - transformAiSdkSpan(span); + // Check metadata transformation + assert.strictEqual( + attributes[`${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.userId`], + "user_456", + ); + assert.strictEqual( + attributes[ + `${SpanAttributes.TRACELOOP_ASSOCIATION_PROPERTIES}.sessionId` + ], + "session_789", + ); + + // Check other transformations still work + assert.strictEqual( + attributes[`${SpanAttributes.LLM_COMPLETIONS}.0.content`], + "I'll help you with that!", + ); + assert.strictEqual( + attributes[`${SpanAttributes.LLM_PROMPTS}.0.content`], + "Help me", + ); + assert.strictEqual(attributes[SpanAttributes.LLM_USAGE_TOTAL_TOKENS], 15); + assert.strictEqual(attributes[SpanAttributes.LLM_SYSTEM], "OpenAI"); - assert.strictEqual(span.name, originalName); - assert.deepStrictEqual(span.attributes, originalAttributes); + // Check original attributes are removed + assert.strictEqual(attributes["ai.telemetry.metadata.userId"], undefined); + assert.strictEqual( + attributes["ai.telemetry.metadata.sessionId"], + undefined, + ); + assert.strictEqual(attributes["ai.response.text"], undefined); + assert.strictEqual(attributes["ai.prompt.messages"], undefined); + assert.strictEqual(attributes["ai.model.provider"], undefined); }); }); }); diff --git a/packages/traceloop-sdk/test/decorators.test.ts b/packages/traceloop-sdk/test/decorators.test.ts index ff5da6d7..8d1cc4ca 100644 --- a/packages/traceloop-sdk/test/decorators.test.ts +++ b/packages/traceloop-sdk/test/decorators.test.ts @@ -647,7 +647,7 @@ describe("Test SDK Decorators", () => { const spans = memoryExporter.getFinishedSpans(); const generateTextSpan = spans.find( - (span) => span.name === "ai.generateText.generate", + (span) => span.name === "text.generate", ); assert.ok(result);