Skip to content

Commit 5e7cd06

Browse files
authored
feat(node): Add tracing support for AzureOpenAI (#18281)
This pull request adds the support to Azure OpenAI client in addition to the existing support of the vanilla OpenAI client. Fixes issue #18280
1 parent cbecbdf commit 5e7cd06

File tree

5 files changed

+251
-4
lines changed

5 files changed

+251
-4
lines changed
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import express from 'express';
2+
import { AzureOpenAI } from 'openai';
3+
4+
function startMockOpenAiServer() {
5+
const app = express();
6+
app.use(express.json());
7+
8+
app.post('/azureopenai/deployments/:model/chat/completions', (req, res) => {
9+
res.send({
10+
id: 'chatcmpl-mock123',
11+
object: 'chat.completion',
12+
created: 1677652288,
13+
model: req.body.model,
14+
system_fingerprint: 'fp_44709d6fcb',
15+
choices: [
16+
{
17+
index: 0,
18+
message: {
19+
role: 'assistant',
20+
content: 'Hello from OpenAI mock!',
21+
},
22+
finish_reason: 'stop',
23+
},
24+
],
25+
usage: {
26+
prompt_tokens: 10,
27+
completion_tokens: 15,
28+
total_tokens: 25,
29+
},
30+
});
31+
});
32+
return new Promise(resolve => {
33+
const server = app.listen(0, () => {
34+
resolve(server);
35+
});
36+
});
37+
}
38+
39+
async function run() {
40+
const server = await startMockOpenAiServer();
41+
42+
const client = new AzureOpenAI({
43+
baseURL: `http://localhost:${server.address().port}/azureopenai`,
44+
apiKey: 'mock-api-key',
45+
apiVersion: '2024-02-15-preview',
46+
});
47+
48+
const response = await client.chat.completions.create({
49+
model: 'gpt-3.5-turbo',
50+
messages: [
51+
{ role: 'system', content: 'You are a helpful assistant.' },
52+
{ role: 'user', content: 'What is the capital of France?' },
53+
],
54+
temperature: 0.7,
55+
max_tokens: 100,
56+
});
57+
58+
// eslint-disable-next-line no-console
59+
console.log(JSON.stringify(response));
60+
61+
server.close();
62+
}
63+
64+
run();

dev-packages/node-integration-tests/suites/tracing/openai/test.ts

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -501,6 +501,53 @@ describe('OpenAI integration', () => {
501501
});
502502
});
503503

504+
createEsmAndCjsTests(__dirname, 'scenario-azure-openai.mjs', 'instrument.mjs', (createRunner, test) => {
505+
test('it works with Azure OpenAI', async () => {
506+
await createRunner()
507+
// First the span that our mock express server is emitting, unrelated to this test
508+
.expect({
509+
transaction: {
510+
transaction: 'POST /azureopenai/deployments/:model/chat/completions',
511+
},
512+
})
513+
.expect({
514+
transaction: {
515+
transaction: 'chat gpt-3.5-turbo',
516+
contexts: {
517+
trace: {
518+
span_id: expect.any(String),
519+
trace_id: expect.any(String),
520+
data: {
521+
'gen_ai.operation.name': 'chat',
522+
'sentry.op': 'gen_ai.chat',
523+
'sentry.origin': 'auto.ai.openai',
524+
'gen_ai.system': 'openai',
525+
'gen_ai.request.model': 'gpt-3.5-turbo',
526+
'gen_ai.request.temperature': 0.7,
527+
'gen_ai.response.model': 'gpt-3.5-turbo',
528+
'gen_ai.response.id': 'chatcmpl-mock123',
529+
'gen_ai.response.finish_reasons': '["stop"]',
530+
'gen_ai.usage.input_tokens': 10,
531+
'gen_ai.usage.output_tokens': 15,
532+
'gen_ai.usage.total_tokens': 25,
533+
'openai.response.id': 'chatcmpl-mock123',
534+
'openai.response.model': 'gpt-3.5-turbo',
535+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
536+
'openai.usage.completion_tokens': 15,
537+
'openai.usage.prompt_tokens': 10,
538+
},
539+
op: 'gen_ai.chat',
540+
origin: 'auto.ai.openai',
541+
status: 'ok',
542+
},
543+
},
544+
},
545+
})
546+
.start()
547+
.completed();
548+
});
549+
});
550+
504551
createEsmAndCjsTests(
505552
__dirname,
506553
'truncation/scenario-message-truncation-completions.mjs',
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
import express from 'express';
2+
import { AzureOpenAI } from 'openai';
3+
4+
function startMockOpenAiServer() {
5+
const app = express();
6+
app.use(express.json());
7+
8+
app.post('/azureopenai/deployments/:model/chat/completions', (req, res) => {
9+
res.send({
10+
id: 'chatcmpl-mock123',
11+
object: 'chat.completion',
12+
created: 1677652288,
13+
model: req.body.model,
14+
system_fingerprint: 'fp_44709d6fcb',
15+
choices: [
16+
{
17+
index: 0,
18+
message: {
19+
role: 'assistant',
20+
content: 'Hello from OpenAI mock!',
21+
},
22+
finish_reason: 'stop',
23+
},
24+
],
25+
usage: {
26+
prompt_tokens: 10,
27+
completion_tokens: 15,
28+
total_tokens: 25,
29+
},
30+
});
31+
});
32+
return new Promise(resolve => {
33+
const server = app.listen(0, () => {
34+
resolve(server);
35+
});
36+
});
37+
}
38+
39+
async function run() {
40+
const server = await startMockOpenAiServer();
41+
42+
const client = new AzureOpenAI({
43+
baseURL: `http://localhost:${server.address().port}/azureopenai`,
44+
apiKey: 'mock-api-key',
45+
apiVersion: '2024-02-15-preview',
46+
});
47+
48+
const response = await client.chat.completions.create({
49+
model: 'gpt-3.5-turbo',
50+
messages: [
51+
{ role: 'system', content: 'You are a helpful assistant.' },
52+
{ role: 'user', content: 'What is the capital of France?' },
53+
],
54+
temperature: 0.7,
55+
max_tokens: 100,
56+
});
57+
58+
// eslint-disable-next-line no-console
59+
console.log(JSON.stringify(response));
60+
61+
server.close();
62+
}
63+
64+
run();

dev-packages/node-integration-tests/suites/tracing/openai/v6/test.ts

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -562,4 +562,62 @@ describe('OpenAI integration (V6)', () => {
562562
},
563563
},
564564
);
565+
566+
createEsmAndCjsTests(
567+
__dirname,
568+
'scenario-azure-openai.mjs',
569+
'instrument.mjs',
570+
(createRunner, test) => {
571+
test('it works with Azure OpenAI (v6)', async () => {
572+
await createRunner()
573+
// First the span that our mock express server is emitting, unrelated to this test
574+
.expect({
575+
transaction: {
576+
transaction: 'POST /azureopenai/deployments/:model/chat/completions',
577+
},
578+
})
579+
.expect({
580+
transaction: {
581+
transaction: 'chat gpt-3.5-turbo',
582+
contexts: {
583+
trace: {
584+
span_id: expect.any(String),
585+
trace_id: expect.any(String),
586+
data: {
587+
'gen_ai.operation.name': 'chat',
588+
'sentry.op': 'gen_ai.chat',
589+
'sentry.origin': 'auto.ai.openai',
590+
'gen_ai.system': 'openai',
591+
'gen_ai.request.model': 'gpt-3.5-turbo',
592+
'gen_ai.request.temperature': 0.7,
593+
'gen_ai.response.model': 'gpt-3.5-turbo',
594+
'gen_ai.response.id': 'chatcmpl-mock123',
595+
'gen_ai.response.finish_reasons': '["stop"]',
596+
'gen_ai.usage.input_tokens': 10,
597+
'gen_ai.usage.output_tokens': 15,
598+
'gen_ai.usage.total_tokens': 25,
599+
'openai.response.id': 'chatcmpl-mock123',
600+
'openai.response.model': 'gpt-3.5-turbo',
601+
'openai.response.timestamp': '2023-03-01T06:31:28.000Z',
602+
'openai.usage.completion_tokens': 15,
603+
'openai.usage.prompt_tokens': 10,
604+
},
605+
op: 'gen_ai.chat',
606+
origin: 'auto.ai.openai',
607+
status: 'ok',
608+
},
609+
},
610+
},
611+
})
612+
.start()
613+
.completed();
614+
});
615+
},
616+
{
617+
additionalDependencies: {
618+
openai: '6.0.0',
619+
express: 'latest',
620+
},
621+
},
622+
);
565623
});

packages/node/src/integrations/tracing/openai/instrumentation.ts

Lines changed: 18 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@ export interface OpenAiIntegration extends Integration {
2525
interface PatchedModuleExports {
2626
[key: string]: unknown;
2727
OpenAI: abstract new (...args: unknown[]) => OpenAiClient;
28+
AzureOpenAI?: abstract new (...args: unknown[]) => OpenAiClient;
2829
}
2930

3031
/**
@@ -56,10 +57,23 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase<Instrumenta
5657
}
5758

5859
/**
59-
* Core patch logic applying instrumentation to the OpenAI client constructor.
60+
* Core patch logic applying instrumentation to the OpenAI and AzureOpenAI client constructors.
6061
*/
6162
private _patch(exports: PatchedModuleExports): PatchedModuleExports | void {
62-
const Original = exports.OpenAI;
63+
let result = exports;
64+
result = this._patchClient(result, 'OpenAI');
65+
result = this._patchClient(result, 'AzureOpenAI');
66+
return result;
67+
}
68+
69+
/**
70+
* Patch logic applying instrumentation to the specified client constructor.
71+
*/
72+
private _patchClient(exports: PatchedModuleExports, exportKey: 'OpenAI' | 'AzureOpenAI'): PatchedModuleExports {
73+
const Original = exports[exportKey];
74+
if (!Original) {
75+
return exports;
76+
}
6377

6478
const WrappedOpenAI = function (this: unknown, ...args: unknown[]) {
6579
// Check if wrapping should be skipped (e.g., when LangChain is handling instrumentation)
@@ -97,10 +111,10 @@ export class SentryOpenAiInstrumentation extends InstrumentationBase<Instrumenta
97111
// Constructor replacement - handle read-only properties
98112
// The OpenAI property might have only a getter, so use defineProperty
99113
try {
100-
exports.OpenAI = WrappedOpenAI;
114+
exports[exportKey] = WrappedOpenAI;
101115
} catch (error) {
102116
// If direct assignment fails, override the property descriptor
103-
Object.defineProperty(exports, 'OpenAI', {
117+
Object.defineProperty(exports, exportKey, {
104118
value: WrappedOpenAI,
105119
writable: true,
106120
configurable: true,

0 commit comments

Comments
 (0)