Skip to content

Commit 7aaece3

Browse files
committed
feat: Add instrumentation for google genai
1 parent 2cde2a4 commit 7aaece3

File tree

16 files changed

+1211
-0
lines changed

16 files changed

+1211
-0
lines changed
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
registerEsmLoaderHooks: false,
11+
integrations: [
12+
Sentry.googleGenAIIntegration({
13+
recordInputs: true,
14+
recordOutputs: true,
15+
}),
16+
],
17+
});
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
registerEsmLoaderHooks: false,
11+
integrations: [Sentry.googleGenAIIntegration()],
12+
});
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://public@dsn.ingest.sentry.io/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
registerEsmLoaderHooks: false,
11+
integrations: [Sentry.googleGenAIIntegration()],
12+
});
Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
import { instrumentGoogleGenAIClient } from '@sentry/core';
2+
import * as Sentry from '@sentry/node';
3+
4+
class MockGoogleGenAI {
5+
constructor(config) {
6+
this.apiKey = config.apiKey;
7+
8+
this.models = {
9+
generateContent: async params => {
10+
// Simulate processing time
11+
await new Promise(resolve => setTimeout(resolve, 10));
12+
13+
if (params.model === 'error-model') {
14+
const error = new Error('Model not found');
15+
error.status = 404;
16+
throw error;
17+
}
18+
19+
return {
20+
candidates: [
21+
{
22+
content: {
23+
parts: [
24+
{
25+
text: params.contents
26+
? 'The capital of France is Paris.'
27+
: 'Mock response from Google GenAI!',
28+
},
29+
],
30+
role: 'model',
31+
},
32+
finishReason: 'stop',
33+
index: 0,
34+
},
35+
],
36+
usageMetadata: {
37+
promptTokenCount: 8,
38+
candidatesTokenCount: 12,
39+
totalTokenCount: 20,
40+
},
41+
};
42+
},
43+
};
44+
45+
this.chats = {
46+
create: () => {
47+
// Return a chat instance with sendMessage method
48+
return {
49+
sendMessage: async () => {
50+
// Simulate processing time
51+
await new Promise(resolve => setTimeout(resolve, 10));
52+
53+
return {
54+
candidates: [
55+
{
56+
content: {
57+
parts: [
58+
{
59+
text: 'Mock response from Google GenAI!',
60+
},
61+
],
62+
role: 'model',
63+
},
64+
finishReason: 'stop',
65+
index: 0,
66+
},
67+
],
68+
usageMetadata: {
69+
promptTokenCount: 10,
70+
candidatesTokenCount: 15,
71+
totalTokenCount: 25,
72+
},
73+
};
74+
},
75+
};
76+
},
77+
};
78+
}
79+
}
80+
81+
async function run() {
82+
const genAI = new MockGoogleGenAI({ apiKey: 'test-api-key' });
83+
const instrumentedClient = instrumentGoogleGenAIClient(genAI);
84+
85+
await Sentry.startSpan({ name: 'main', op: 'function' }, async () => {
86+
// Test 1: chats.create and sendMessage flow
87+
const chat = instrumentedClient.chats.create({
88+
model: 'gemini-1.5-pro',
89+
config: {
90+
temperature: 0.8,
91+
topP: 0.9,
92+
maxOutputTokens: 150,
93+
},
94+
history: [
95+
{
96+
role: 'user',
97+
parts: [{ text: 'Hello, how are you?' }],
98+
},
99+
],
100+
});
101+
102+
await chat.sendMessage({
103+
message: 'Tell me a joke',
104+
});
105+
106+
// Test 2: models.generateContent
107+
await instrumentedClient.models.generateContent({
108+
model: 'gemini-1.5-flash',
109+
config: {
110+
temperature: 0.7,
111+
topP: 0.9,
112+
maxOutputTokens: 100,
113+
},
114+
contents: [
115+
{
116+
role: 'user',
117+
parts: [{ text: 'What is the capital of France?' }],
118+
},
119+
],
120+
});
121+
122+
// Test 3: Error handling
123+
try {
124+
await instrumentedClient.models.generateContent({
125+
model: 'error-model',
126+
contents: [
127+
{
128+
role: 'user',
129+
parts: [{ text: 'This will fail' }],
130+
},
131+
],
132+
});
133+
} catch (error) {
134+
// Expected error
135+
}
136+
});
137+
}
138+
139+
run();
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
import { afterAll, describe, expect } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('Google GenAI integration', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
// First span - chats.create
13+
expect.objectContaining({
14+
data: {
15+
'gen_ai.operation.name': 'chat',
16+
'sentry.op': 'gen_ai.chat',
17+
'sentry.origin': 'auto.ai.google_genai',
18+
'gen_ai.system': 'google_genai',
19+
'gen_ai.request.model': 'gemini-1.5-pro',
20+
'gen_ai.request.temperature': 0.8,
21+
'gen_ai.request.top_p': 0.9,
22+
'gen_ai.request.max_tokens': 150,
23+
},
24+
description: 'chat gemini-1.5-pro create',
25+
op: 'gen_ai.chat',
26+
origin: 'auto.ai.google_genai',
27+
status: 'ok',
28+
}),
29+
// Second span - chat.sendMessage (should get model from context)
30+
expect.objectContaining({
31+
data: {
32+
'gen_ai.operation.name': 'chat',
33+
'sentry.op': 'gen_ai.chat',
34+
'sentry.origin': 'auto.ai.google_genai',
35+
'gen_ai.system': 'google_genai',
36+
'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context
37+
'gen_ai.usage.input_tokens': 10,
38+
'gen_ai.usage.output_tokens': 15,
39+
'gen_ai.usage.total_tokens': 25,
40+
},
41+
description: 'chat gemini-1.5-pro',
42+
op: 'gen_ai.chat',
43+
origin: 'auto.ai.google_genai',
44+
status: 'ok',
45+
}),
46+
// Third span - models.generateContent
47+
expect.objectContaining({
48+
data: {
49+
'gen_ai.operation.name': 'models',
50+
'sentry.op': 'gen_ai.models',
51+
'sentry.origin': 'auto.ai.google_genai',
52+
'gen_ai.system': 'google_genai',
53+
'gen_ai.request.model': 'gemini-1.5-flash',
54+
'gen_ai.request.temperature': 0.7,
55+
'gen_ai.request.top_p': 0.9,
56+
'gen_ai.request.max_tokens': 100,
57+
'gen_ai.usage.input_tokens': 8,
58+
'gen_ai.usage.output_tokens': 12,
59+
'gen_ai.usage.total_tokens': 20,
60+
},
61+
description: 'models gemini-1.5-flash',
62+
op: 'gen_ai.models',
63+
origin: 'auto.ai.google_genai',
64+
status: 'ok',
65+
}),
66+
// Fourth span - error handling
67+
expect.objectContaining({
68+
data: {
69+
'gen_ai.operation.name': 'models',
70+
'sentry.op': 'gen_ai.models',
71+
'sentry.origin': 'auto.ai.google_genai',
72+
'gen_ai.system': 'google_genai',
73+
'gen_ai.request.model': 'error-model',
74+
},
75+
description: 'models error-model',
76+
op: 'gen_ai.models',
77+
origin: 'auto.ai.google_genai',
78+
status: 'unknown_error',
79+
}),
80+
]),
81+
};
82+
83+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
84+
transaction: 'main',
85+
spans: expect.arrayContaining([
86+
// First span - chats.create with PII
87+
expect.objectContaining({
88+
data: expect.objectContaining({
89+
'gen_ai.operation.name': 'chat',
90+
'sentry.op': 'gen_ai.chat',
91+
'sentry.origin': 'auto.ai.google_genai',
92+
'gen_ai.system': 'google_genai',
93+
'gen_ai.request.model': 'gemini-1.5-pro',
94+
'gen_ai.request.temperature': 0.8,
95+
'gen_ai.request.top_p': 0.9,
96+
'gen_ai.request.max_tokens': 150,
97+
'gen_ai.request.messages': expect.any(String), // Should include history when recordInputs: true
98+
}),
99+
description: 'chat gemini-1.5-pro create',
100+
op: 'gen_ai.chat',
101+
origin: 'auto.ai.google_genai',
102+
status: 'ok',
103+
}),
104+
// Second span - chat.sendMessage with PII
105+
expect.objectContaining({
106+
data: expect.objectContaining({
107+
'gen_ai.operation.name': 'chat',
108+
'sentry.op': 'gen_ai.chat',
109+
'sentry.origin': 'auto.ai.google_genai',
110+
'gen_ai.system': 'google_genai',
111+
'gen_ai.request.model': 'gemini-1.5-pro',
112+
'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true
113+
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
114+
'gen_ai.usage.input_tokens': 10,
115+
'gen_ai.usage.output_tokens': 15,
116+
'gen_ai.usage.total_tokens': 25,
117+
}),
118+
description: 'chat gemini-1.5-pro',
119+
op: 'gen_ai.chat',
120+
origin: 'auto.ai.google_genai',
121+
status: 'ok',
122+
}),
123+
// Third span - models.generateContent with PII
124+
expect.objectContaining({
125+
data: expect.objectContaining({
126+
'gen_ai.operation.name': 'models',
127+
'sentry.op': 'gen_ai.models',
128+
'sentry.origin': 'auto.ai.google_genai',
129+
'gen_ai.system': 'google_genai',
130+
'gen_ai.request.model': 'gemini-1.5-flash',
131+
'gen_ai.request.temperature': 0.7,
132+
'gen_ai.request.top_p': 0.9,
133+
'gen_ai.request.max_tokens': 100,
134+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
135+
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
136+
'gen_ai.usage.input_tokens': 8,
137+
'gen_ai.usage.output_tokens': 12,
138+
'gen_ai.usage.total_tokens': 20,
139+
}),
140+
description: 'models gemini-1.5-flash',
141+
op: 'gen_ai.models',
142+
origin: 'auto.ai.google_genai',
143+
status: 'ok',
144+
}),
145+
// Fourth span - error handling with PII
146+
expect.objectContaining({
147+
data: expect.objectContaining({
148+
'gen_ai.operation.name': 'models',
149+
'sentry.op': 'gen_ai.models',
150+
'sentry.origin': 'auto.ai.google_genai',
151+
'gen_ai.system': 'google_genai',
152+
'gen_ai.request.model': 'error-model',
153+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
154+
}),
155+
description: 'models error-model',
156+
op: 'gen_ai.models',
157+
origin: 'auto.ai.google_genai',
158+
status: 'unknown_error',
159+
}),
160+
]),
161+
};
162+
163+
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
164+
transaction: 'main',
165+
spans: expect.arrayContaining([
166+
// Check that custom options are respected
167+
expect.objectContaining({
168+
data: expect.objectContaining({
169+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
170+
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
171+
}),
172+
}),
173+
]),
174+
};
175+
176+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
177+
test('creates google genai related spans with sendDefaultPii: false', async () => {
178+
await createRunner()
179+
.ignore('event')
180+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
181+
.start()
182+
.completed();
183+
});
184+
});
185+
186+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
187+
test('creates google genai related spans with sendDefaultPii: true', async () => {
188+
await createRunner()
189+
.ignore('event')
190+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
191+
.start()
192+
.completed();
193+
});
194+
});
195+
196+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
197+
test('creates google genai related spans with custom options', async () => {
198+
await createRunner()
199+
.ignore('event')
200+
.expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
201+
.start()
202+
.completed();
203+
});
204+
});
205+
});

0 commit comments

Comments
 (0)