Skip to content
1 change: 1 addition & 0 deletions dev-packages/node-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@
"@types/mongodb": "^3.6.20",
"@types/mysql": "^2.15.21",
"@types/pg": "^8.6.5",
"ai": "^4.0.6",
"amqplib": "^0.10.4",
"apollo-server": "^3.11.1",
"axios": "^1.7.7",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ import { loggingTransport } from '@sentry-internal/node-integration-tests';
import * as Sentry from '@sentry/node';

Sentry.init({
debug: true,
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
Expand Down
58 changes: 58 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/ai/scenario.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
const { loggingTransport } = require('@sentry-internal/node-integration-tests');
const Sentry = require('@sentry/node');

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
transport: loggingTransport,
});

const { generateText } = require('ai');
const { MockLanguageModelV1 } = require('ai/test');

async function run() {
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
await generateText({
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'First span here!',
}),
}),
prompt: 'Where is the first span?',
});

// This span should have input and output prompts attached because telemetry is explicitly enabled.
await generateText({
experimental_telemetry: { isEnabled: true },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Second span here!',
}),
}),
prompt: 'Where is the second span?',
});

// This span should not be captured because we've disabled telemetry
await generateText({
experimental_telemetry: { isEnabled: false },
model: new MockLanguageModelV1({
doGenerate: async () => ({
rawCall: { rawPrompt: null, rawSettings: {} },
finishReason: 'stop',
usage: { promptTokens: 10, completionTokens: 20 },
text: 'Third span here!',
}),
}),
prompt: 'Where is the third span?',
});
});
}

run();
131 changes: 131 additions & 0 deletions dev-packages/node-integration-tests/suites/tracing/ai/test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
import { conditionalTest } from '../../../utils';
import { cleanupChildProcesses, createRunner } from '../../../utils/runner';

// `ai` SDK only support Node 18+
conditionalTest({ min: 18 })('ai', () => {
afterAll(() => {
cleanupChildProcesses();
});

test('creates ai related spans', done => {
const EXPECTED_TRANSACTION = {
transaction: 'main',
spans: expect.arrayContaining([
expect.objectContaining({
data: expect.objectContaining({
'ai.completion_tokens.used': 20,
'ai.model.id': 'mock-model-id',
'ai.model.provider': 'mock-provider',
'ai.model_id': 'mock-model-id',
'ai.operationId': 'ai.generateText',
'ai.pipeline.name': 'generateText',
'ai.prompt_tokens.used': 10,
'ai.response.finishReason': 'stop',
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
'ai.total_tokens.used': 30,
'ai.usage.completionTokens': 20,
'ai.usage.promptTokens': 10,
'operation.name': 'ai.generateText',
'sentry.op': 'ai.pipeline.generateText',
'sentry.origin': 'auto.vercelai.otel',
}),
description: 'generateText',
op: 'ai.pipeline.generateText',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.vercelai.otel',
'sentry.op': 'ai.run.doGenerate',
'operation.name': 'ai.generateText.doGenerate',
'ai.operationId': 'ai.generateText.doGenerate',
'ai.model.provider': 'mock-provider',
'ai.model.id': 'mock-model-id',
'ai.settings.maxRetries': 2,
'gen_ai.system': 'mock-provider',
'gen_ai.request.model': 'mock-model-id',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.model_id': 'mock-model-id',
'ai.streaming': false,
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.usage.promptTokens': 10,
'ai.usage.completionTokens': 20,
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
'ai.completion_tokens.used': 20,
'ai.prompt_tokens.used': 10,
'ai.total_tokens.used': 30,
}),
description: 'generateText.doGenerate',
op: 'ai.run.doGenerate',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'ai.completion_tokens.used': 20,
'ai.model.id': 'mock-model-id',
'ai.model.provider': 'mock-provider',
'ai.model_id': 'mock-model-id',
'ai.prompt': '{"prompt":"Where is the second span?"}',
'ai.operationId': 'ai.generateText',
'ai.pipeline.name': 'generateText',
'ai.prompt_tokens.used': 10,
'ai.response.finishReason': 'stop',
'ai.input_messages': '{"prompt":"Where is the second span?"}',
'ai.settings.maxRetries': 2,
'ai.settings.maxSteps': 1,
'ai.streaming': false,
'ai.total_tokens.used': 30,
'ai.usage.completionTokens': 20,
'ai.usage.promptTokens': 10,
'operation.name': 'ai.generateText',
'sentry.op': 'ai.pipeline.generateText',
'sentry.origin': 'auto.vercelai.otel',
}),
description: 'generateText',
op: 'ai.pipeline.generateText',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
expect.objectContaining({
data: expect.objectContaining({
'sentry.origin': 'auto.vercelai.otel',
'sentry.op': 'ai.run.doGenerate',
'operation.name': 'ai.generateText.doGenerate',
'ai.operationId': 'ai.generateText.doGenerate',
'ai.model.provider': 'mock-provider',
'ai.model.id': 'mock-model-id',
'ai.settings.maxRetries': 2,
'gen_ai.system': 'mock-provider',
'gen_ai.request.model': 'mock-model-id',
'ai.pipeline.name': 'generateText.doGenerate',
'ai.model_id': 'mock-model-id',
'ai.streaming': false,
'ai.response.finishReason': 'stop',
'ai.response.model': 'mock-model-id',
'ai.usage.promptTokens': 10,
'ai.usage.completionTokens': 20,
'gen_ai.response.finish_reasons': ['stop'],
'gen_ai.usage.input_tokens': 10,
'gen_ai.usage.output_tokens': 20,
'ai.completion_tokens.used': 20,
'ai.prompt_tokens.used': 10,
'ai.total_tokens.used': 30,
}),
description: 'generateText.doGenerate',
op: 'ai.run.doGenerate',
origin: 'auto.vercelai.otel',
status: 'ok',
}),
]),
};

createRunner(__dirname, 'scenario.js').expect({ transaction: EXPECTED_TRANSACTION }).start(done);
});
});
3 changes: 3 additions & 0 deletions packages/node/src/integrations/tracing/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ import { instrumentNest, nestIntegration } from './nest/nest';
import { instrumentPostgres, postgresIntegration } from './postgres';
import { instrumentRedis, redisIntegration } from './redis';
import { instrumentTedious, tediousIntegration } from './tedious';
import { instrumentVercelAi, vercelAIIntegration } from './vercelai';

/**
* With OTEL, all performance integrations will be added, as OTEL only initializes them when the patched package is actually required.
Expand Down Expand Up @@ -48,6 +49,7 @@ export function getAutoPerformanceIntegrations(): Integration[] {
kafkaIntegration(),
amqplibIntegration(),
lruMemoizerIntegration(),
vercelAIIntegration(),
];
}

Expand Down Expand Up @@ -78,5 +80,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) =>
instrumentTedious,
instrumentGenericPool,
instrumentAmqplib,
instrumentVercelAi,
];
}
Loading
Loading