Skip to content

Commit 66f4025

Browse files
committed
fix(tracing): add gen_ai.request.messages.original_length attributes (#18608)
Whenever the `gen_ai.request.messages` is potentially truncated, add a `gen_ai.request.messages.original_length` attribute indicating the initial length of the array, so that the truncation is evident. Closes JS-1350
1 parent 1307651 commit 66f4025

File tree

13 files changed

+100
-20
lines changed

13 files changed

+100
-20
lines changed

dev-packages/node-integration-tests/suites/tracing/openai/openai-tool-calls/test.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -182,6 +182,7 @@ describe('OpenAI Tool Calls integration', () => {
182182
'sentry.origin': 'auto.ai.openai',
183183
'gen_ai.system': 'openai',
184184
'gen_ai.request.model': 'gpt-4',
185+
'gen_ai.request.messages.original_length': 1,
185186
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
186187
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
187188
'gen_ai.response.model': 'gpt-4',
@@ -212,6 +213,7 @@ describe('OpenAI Tool Calls integration', () => {
212213
'gen_ai.system': 'openai',
213214
'gen_ai.request.model': 'gpt-4',
214215
'gen_ai.request.stream': true,
216+
'gen_ai.request.messages.original_length': 1,
215217
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
216218
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
217219
'gen_ai.response.model': 'gpt-4',
@@ -241,6 +243,7 @@ describe('OpenAI Tool Calls integration', () => {
241243
'sentry.origin': 'auto.ai.openai',
242244
'gen_ai.system': 'openai',
243245
'gen_ai.request.model': 'gpt-4',
246+
'gen_ai.request.messages.original_length': 1,
244247
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
245248
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
246249
'gen_ai.response.model': 'gpt-4',
@@ -270,6 +273,7 @@ describe('OpenAI Tool Calls integration', () => {
270273
'gen_ai.system': 'openai',
271274
'gen_ai.request.model': 'gpt-4',
272275
'gen_ai.request.stream': true,
276+
'gen_ai.request.messages.original_length': 1,
273277
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather like in Paris today?"}]',
274278
'gen_ai.request.available_tools': WEATHER_TOOL_DEFINITION,
275279
'gen_ai.response.model': 'gpt-4',

dev-packages/node-integration-tests/suites/tracing/openai/test.ts

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,7 @@ describe('OpenAI integration', () => {
159159
'gen_ai.system': 'openai',
160160
'gen_ai.request.model': 'gpt-3.5-turbo',
161161
'gen_ai.request.temperature': 0.7,
162+
'gen_ai.request.messages.original_length': 2,
162163
'gen_ai.request.messages':
163164
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]',
164165
'gen_ai.response.model': 'gpt-3.5-turbo',
@@ -214,6 +215,7 @@ describe('OpenAI integration', () => {
214215
'sentry.origin': 'auto.ai.openai',
215216
'gen_ai.system': 'openai',
216217
'gen_ai.request.model': 'error-model',
218+
'gen_ai.request.messages.original_length': 1,
217219
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
218220
},
219221
description: 'chat error-model',
@@ -231,6 +233,7 @@ describe('OpenAI integration', () => {
231233
'gen_ai.request.model': 'gpt-4',
232234
'gen_ai.request.temperature': 0.8,
233235
'gen_ai.request.stream': true,
236+
'gen_ai.request.messages.original_length': 2,
234237
'gen_ai.request.messages':
235238
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"Tell me about streaming"}]',
236239
'gen_ai.response.text': 'Hello from OpenAI streaming!',
@@ -287,6 +290,7 @@ describe('OpenAI integration', () => {
287290
'gen_ai.operation.name': 'chat',
288291
'gen_ai.request.model': 'error-model',
289292
'gen_ai.request.stream': true,
293+
'gen_ai.request.messages.original_length': 1,
290294
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
291295
'gen_ai.system': 'openai',
292296
'sentry.op': 'gen_ai.chat',

dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -159,6 +159,7 @@ describe('OpenAI integration (V6)', () => {
159159
'gen_ai.system': 'openai',
160160
'gen_ai.request.model': 'gpt-3.5-turbo',
161161
'gen_ai.request.temperature': 0.7,
162+
'gen_ai.request.messages.original_length': 2,
162163
'gen_ai.request.messages':
163164
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"What is the capital of France?"}]',
164165
'gen_ai.response.model': 'gpt-3.5-turbo',
@@ -214,6 +215,7 @@ describe('OpenAI integration (V6)', () => {
214215
'sentry.origin': 'auto.ai.openai',
215216
'gen_ai.system': 'openai',
216217
'gen_ai.request.model': 'error-model',
218+
'gen_ai.request.messages.original_length': 1,
217219
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
218220
},
219221
description: 'chat error-model',
@@ -231,6 +233,7 @@ describe('OpenAI integration (V6)', () => {
231233
'gen_ai.request.model': 'gpt-4',
232234
'gen_ai.request.temperature': 0.8,
233235
'gen_ai.request.stream': true,
236+
'gen_ai.request.messages.original_length': 2,
234237
'gen_ai.request.messages':
235238
'[{"role":"system","content":"You are a helpful assistant."},{"role":"user","content":"Tell me about streaming"}]',
236239
'gen_ai.response.text': 'Hello from OpenAI streaming!',
@@ -287,6 +290,7 @@ describe('OpenAI integration (V6)', () => {
287290
'gen_ai.operation.name': 'chat',
288291
'gen_ai.request.model': 'error-model',
289292
'gen_ai.request.stream': true,
293+
'gen_ai.request.messages.original_length': 1,
290294
'gen_ai.request.messages': '[{"role":"user","content":"This will fail"}]',
291295
'gen_ai.system': 'openai',
292296
'sentry.op': 'gen_ai.chat',
@@ -306,13 +310,15 @@ describe('OpenAI integration (V6)', () => {
306310
// Check that custom options are respected
307311
expect.objectContaining({
308312
data: expect.objectContaining({
313+
'gen_ai.request.messages.original_length': expect.any(Number),
309314
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
310315
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
311316
}),
312317
}),
313318
// Check that custom options are respected for streaming
314319
expect.objectContaining({
315320
data: expect.objectContaining({
321+
'gen_ai.request.messages.original_length': expect.any(Number),
316322
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
317323
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
318324
'gen_ai.request.stream': true, // Should be marked as stream

dev-packages/node-integration-tests/suites/tracing/vercelai/test.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,7 @@ describe('Vercel AI integration', () => {
6767
expect.objectContaining({
6868
data: {
6969
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
70+
'gen_ai.request.messages.original_length': 1,
7071
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
7172
'gen_ai.request.model': 'mock-model-id',
7273
'gen_ai.response.model': 'mock-model-id',
@@ -95,6 +96,7 @@ describe('Vercel AI integration', () => {
9596
expect.objectContaining({
9697
data: {
9798
'gen_ai.request.messages': expect.any(String),
99+
'gen_ai.request.messages.original_length': expect.any(Number),
98100
'gen_ai.request.model': 'mock-model-id',
99101
'gen_ai.response.finish_reasons': ['stop'],
100102
'gen_ai.response.id': expect.any(String),
@@ -205,6 +207,7 @@ describe('Vercel AI integration', () => {
205207
expect.objectContaining({
206208
data: {
207209
'gen_ai.prompt': '{"prompt":"Where is the first span?"}',
210+
'gen_ai.request.messages.original_length': 1,
208211
'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]',
209212
'gen_ai.request.model': 'mock-model-id',
210213
'gen_ai.response.model': 'mock-model-id',
@@ -237,6 +240,7 @@ describe('Vercel AI integration', () => {
237240
// Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
238241
expect.objectContaining({
239242
data: {
243+
'gen_ai.request.messages.original_length': 1,
240244
'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
241245
'gen_ai.request.model': 'mock-model-id',
242246
'gen_ai.response.finish_reasons': ['stop'],
@@ -275,6 +279,7 @@ describe('Vercel AI integration', () => {
275279
expect.objectContaining({
276280
data: {
277281
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
282+
'gen_ai.request.messages.original_length': 1,
278283
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
279284
'gen_ai.request.model': 'mock-model-id',
280285
'gen_ai.response.model': 'mock-model-id',
@@ -308,6 +313,7 @@ describe('Vercel AI integration', () => {
308313
expect.objectContaining({
309314
data: {
310315
'gen_ai.request.messages': expect.any(String),
316+
'gen_ai.request.messages.original_length': expect.any(Number),
311317
'gen_ai.request.model': 'mock-model-id',
312318
'gen_ai.response.finish_reasons': ['stop'],
313319
'gen_ai.response.id': expect.any(String),
@@ -345,6 +351,7 @@ describe('Vercel AI integration', () => {
345351
expect.objectContaining({
346352
data: {
347353
'gen_ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
354+
'gen_ai.request.messages.original_length': 1,
348355
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
349356
'gen_ai.request.model': 'mock-model-id',
350357
'gen_ai.response.model': 'mock-model-id',
@@ -380,6 +387,7 @@ describe('Vercel AI integration', () => {
380387
data: {
381388
'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON,
382389
'gen_ai.request.messages': expect.any(String),
390+
'gen_ai.request.messages.original_length': expect.any(Number),
383391
'gen_ai.request.model': 'mock-model-id',
384392
'gen_ai.response.finish_reasons': ['tool-calls'],
385393
'gen_ai.response.id': expect.any(String),

dev-packages/node-integration-tests/suites/tracing/vercelai/v5/test.ts

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -75,6 +75,7 @@ describe('Vercel AI integration (V5)', () => {
7575
'vercel.ai.settings.maxRetries': 2,
7676
'vercel.ai.streaming': false,
7777
'gen_ai.prompt': '{"prompt":"Where is the second span?"}',
78+
'gen_ai.request.messages.original_length': 1,
7879
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
7980
'gen_ai.response.model': 'mock-model-id',
8081
'gen_ai.usage.input_tokens': 10,
@@ -107,6 +108,7 @@ describe('Vercel AI integration (V5)', () => {
107108
'vercel.ai.response.id': expect.any(String),
108109
'gen_ai.response.text': expect.any(String),
109110
'vercel.ai.response.timestamp': expect.any(String),
111+
'gen_ai.request.messages.original_length': expect.any(Number),
110112
'gen_ai.request.messages': expect.any(String),
111113
'gen_ai.response.finish_reasons': ['stop'],
112114
'gen_ai.usage.input_tokens': 10,
@@ -205,6 +207,7 @@ describe('Vercel AI integration (V5)', () => {
205207
'vercel.ai.operationId': 'ai.generateText',
206208
'vercel.ai.pipeline.name': 'generateText',
207209
'vercel.ai.prompt': '{"prompt":"Where is the first span?"}',
210+
'gen_ai.request.messages.original_length': 1,
208211
'gen_ai.request.messages': '[{"role":"user","content":"Where is the first span?"}]',
209212
'vercel.ai.response.finishReason': 'stop',
210213
'gen_ai.response.text': 'First span here!',
@@ -231,6 +234,7 @@ describe('Vercel AI integration (V5)', () => {
231234
'vercel.ai.model.provider': 'mock-provider',
232235
'vercel.ai.operationId': 'ai.generateText.doGenerate',
233236
'vercel.ai.pipeline.name': 'generateText.doGenerate',
237+
'gen_ai.request.messages.original_length': 1,
234238
'gen_ai.request.messages': '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]',
235239
'vercel.ai.response.finishReason': 'stop',
236240
'vercel.ai.response.id': expect.any(String),
@@ -263,6 +267,7 @@ describe('Vercel AI integration (V5)', () => {
263267
'vercel.ai.operationId': 'ai.generateText',
264268
'vercel.ai.pipeline.name': 'generateText',
265269
'vercel.ai.prompt': '{"prompt":"Where is the second span?"}',
270+
'gen_ai.request.messages.original_length': 1,
266271
'gen_ai.request.messages': '[{"role":"user","content":"Where is the second span?"}]',
267272
'vercel.ai.response.finishReason': 'stop',
268273
'gen_ai.response.text': expect.any(String),
@@ -300,6 +305,7 @@ describe('Vercel AI integration (V5)', () => {
300305
'vercel.ai.response.id': expect.any(String),
301306
'gen_ai.response.text': expect.any(String),
302307
'vercel.ai.response.timestamp': expect.any(String),
308+
'gen_ai.request.messages.original_length': expect.any(Number),
303309
'gen_ai.request.messages': expect.any(String),
304310
'gen_ai.response.finish_reasons': ['stop'],
305311
'gen_ai.usage.input_tokens': 10,
@@ -321,6 +327,7 @@ describe('Vercel AI integration (V5)', () => {
321327
'vercel.ai.operationId': 'ai.generateText',
322328
'vercel.ai.pipeline.name': 'generateText',
323329
'vercel.ai.prompt': '{"prompt":"What is the weather in San Francisco?"}',
330+
'gen_ai.request.messages.original_length': 1,
324331
'gen_ai.request.messages': '[{"role":"user","content":"What is the weather in San Francisco?"}]',
325332
'vercel.ai.response.finishReason': 'tool-calls',
326333
'gen_ai.response.tool_calls': expect.any(String),
@@ -347,6 +354,7 @@ describe('Vercel AI integration (V5)', () => {
347354
'vercel.ai.model.provider': 'mock-provider',
348355
'vercel.ai.operationId': 'ai.generateText.doGenerate',
349356
'vercel.ai.pipeline.name': 'generateText.doGenerate',
357+
'gen_ai.request.messages.original_length': expect.any(Number),
350358
'gen_ai.request.messages': expect.any(String),
351359
'vercel.ai.prompt.toolChoice': expect.any(String),
352360
'gen_ai.request.available_tools': EXPECTED_AVAILABLE_TOOLS_JSON,

packages/core/src/tracing/ai/gen-ai-attributes.ts

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -115,6 +115,11 @@ export const GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE = 'gen_ai.usage.total_tokens';
115115
*/
116116
export const GEN_AI_OPERATION_NAME_ATTRIBUTE = 'gen_ai.operation.name';
117117

118+
/**
119+
* Original length of messages array, used to indicate truncations had occured
120+
*/
121+
export const GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE = 'gen_ai.request.messages.original_length';
122+
118123
/**
119124
* The prompt messages
120125
* Only recorded when recordInputs is enabled

packages/core/src/tracing/anthropic-ai/index.ts

Lines changed: 4 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@ import {
1212
GEN_AI_REQUEST_AVAILABLE_TOOLS_ATTRIBUTE,
1313
GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE,
1414
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
15-
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
1615
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
1716
GEN_AI_REQUEST_STREAM_ATTRIBUTE,
1817
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
@@ -24,13 +23,7 @@ import {
2423
GEN_AI_RESPONSE_TOOL_CALLS_ATTRIBUTE,
2524
GEN_AI_SYSTEM_ATTRIBUTE,
2625
} from '../ai/gen-ai-attributes';
27-
import {
28-
buildMethodPath,
29-
getFinalOperationName,
30-
getSpanOperation,
31-
getTruncatedJsonString,
32-
setTokenUsageAttributes,
33-
} from '../ai/utils';
26+
import { buildMethodPath, getFinalOperationName, getSpanOperation, setTokenUsageAttributes } from '../ai/utils';
3427
import { instrumentAsyncIterableStream, instrumentMessageStream } from './streaming';
3528
import type {
3629
AnthropicAiInstrumentedMethod,
@@ -39,7 +32,7 @@ import type {
3932
AnthropicAiStreamingEvent,
4033
ContentBlock,
4134
} from './types';
42-
import { handleResponseError, messagesFromParams, shouldInstrument } from './utils';
35+
import { handleResponseError, messagesFromParams, setMessagesAttribute, shouldInstrument } from './utils';
4336

4437
/**
4538
* Extract request attributes from method arguments
@@ -82,15 +75,10 @@ function extractRequestAttributes(args: unknown[], methodPath: string): Record<s
8275
* This is only recorded if recordInputs is true.
8376
*/
8477
function addPrivateRequestAttributes(span: Span, params: Record<string, unknown>): void {
85-
const messages = messagesFromParams(params);
86-
if (messages.length) {
87-
const truncatedMessages = getTruncatedJsonString(messages);
88-
span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: truncatedMessages });
89-
}
78+
setMessagesAttribute(span, messagesFromParams(params));
9079

9180
if ('input' in params) {
92-
const truncatedInput = getTruncatedJsonString(params.input);
93-
span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: truncatedInput });
81+
setMessagesAttribute(span, params.input);
9482
}
9583

9684
if ('prompt' in params) {

packages/core/src/tracing/anthropic-ai/utils.ts

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
import { captureException } from '../../exports';
22
import { SPAN_STATUS_ERROR } from '../../tracing';
33
import type { Span } from '../../types-hoist/span';
4+
import {
5+
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
6+
GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
7+
} from '../ai/gen-ai-attributes';
8+
import { getTruncatedJsonString } from '../ai/utils';
49
import { ANTHROPIC_AI_INSTRUMENTED_METHODS } from './constants';
510
import type { AnthropicAiInstrumentedMethod, AnthropicAiResponse } from './types';
611

@@ -11,6 +16,23 @@ export function shouldInstrument(methodPath: string): methodPath is AnthropicAiI
1116
return ANTHROPIC_AI_INSTRUMENTED_METHODS.includes(methodPath as AnthropicAiInstrumentedMethod);
1217
}
1318

19+
/**
20+
* Set the messages and messages original length attributes.
21+
*/
22+
export function setMessagesAttribute(span: Span, messages: unknown): void {
23+
if (Array.isArray(messages)) {
24+
if (!messages.length) return;
25+
span.setAttributes({
26+
[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages),
27+
[GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length,
28+
});
29+
} else {
30+
span.setAttributes({
31+
[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: getTruncatedJsonString(messages),
32+
});
33+
}
34+
}
35+
1436
/**
1537
* Capture error information from the response
1638
* @see https://docs.anthropic.com/en/api/errors#error-shapes

packages/core/src/tracing/google-genai/index.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ import {
1111
GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE,
1212
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
1313
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
14+
GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
1415
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
1516
GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE,
1617
GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE,
@@ -167,6 +168,7 @@ function addPrivateRequestAttributes(span: Span, params: Record<string, unknown>
167168

168169
if (messages.length) {
169170
span.setAttributes({
171+
[GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE]: messages.length,
170172
[GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(truncateGenAiMessages(messages)),
171173
});
172174
}

packages/core/src/tracing/langchain/utils.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import {
55
GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE,
66
GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE,
77
GEN_AI_REQUEST_MESSAGES_ATTRIBUTE,
8+
GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE,
89
GEN_AI_REQUEST_MODEL_ATTRIBUTE,
910
GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE,
1011
GEN_AI_REQUEST_STREAM_ATTRIBUTE,
@@ -253,6 +254,7 @@ export function extractLLMRequestAttributes(
253254
const attrs = baseRequestAttributes(system, modelName, 'pipeline', llm, invocationParams, langSmithMetadata);
254255

255256
if (recordInputs && Array.isArray(prompts) && prompts.length > 0) {
257+
setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, prompts.length);
256258
const messages = prompts.map(p => ({ role: 'user', content: p }));
257259
setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, asString(messages));
258260
}
@@ -282,6 +284,7 @@ export function extractChatModelRequestAttributes(
282284

283285
if (recordInputs && Array.isArray(langChainMessages) && langChainMessages.length > 0) {
284286
const normalized = normalizeLangChainMessages(langChainMessages.flat());
287+
setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ORIGINAL_LENGTH_ATTRIBUTE, normalized.length);
285288
const truncated = truncateGenAiMessages(normalized);
286289
setIfDefined(attrs, GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, asString(truncated));
287290
}

0 commit comments

Comments
 (0)