Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dev-packages/node-integration-tests/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
},
"dependencies": {
"@aws-sdk/client-s3": "^3.552.0",
"@google/genai": "^1.20.0",
"@hapi/hapi": "^21.3.10",
"@nestjs/common": "11.1.3",
"@nestjs/core": "11.1.3",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
integrations: [
Sentry.googleGenAIIntegration({
recordInputs: true,
recordOutputs: true,
}),
],
beforeSendTransaction: event => {
// Filter out mock express server transactions
if (event.transaction.includes('/v1beta/')) {
return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: true,
transport: loggingTransport,
beforeSendTransaction: event => {
// Filter out mock express server transactions
if (event.transaction.includes('/v1beta/')) {
return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
import * as Sentry from '@sentry/node';
import { loggingTransport } from '@sentry-internal/node-integration-tests';

Sentry.init({
dsn: 'https://public@dsn.ingest.sentry.io/1337',
release: '1.0',
tracesSampleRate: 1.0,
sendDefaultPii: false,
transport: loggingTransport,
beforeSendTransaction: event => {
// Filter out mock express server transactions
if (event.transaction.includes('/v1beta')) {
return null;
}
return event;
},
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
import { GoogleGenAI } from '@google/genai';
import * as Sentry from '@sentry/node';
import express from 'express';

const PORT = 3333;

function startMockGoogleGenAIServer() {
const app = express();
app.use(express.json());

app.post('/v1beta/models/:model\\:generateContent', (req, res) => {
const model = req.params.model;

if (model === 'error-model') {
res.status(404).set('x-request-id', 'mock-request-123').end('Model not found');
return;
}

res.send({
candidates: [
{
content: {
parts: [
{
text: 'Mock response from Google GenAI!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
});
});

return app.listen(PORT);
}

async function run() {
const server = startMockGoogleGenAIServer();

await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
const client = new GoogleGenAI({
apiKey: 'mock-api-key',
httpOptions: { baseUrl: `http://localhost:${PORT}` },
});

// Test 1: chats.create and sendMessage flow
const chat = client.chats.create({
model: 'gemini-1.5-pro',
config: {
temperature: 0.8,
topP: 0.9,
maxOutputTokens: 150,
},
history: [
{
role: 'user',
parts: [{ text: 'Hello, how are you?' }],
},
],
});

await chat.sendMessage({
message: 'Tell me a joke',
});

// Test 2: models.generateContent
await client.models.generateContent({
model: 'gemini-1.5-flash',
config: {
temperature: 0.7,
topP: 0.9,
maxOutputTokens: 100,
},
contents: [
{
role: 'user',
parts: [{ text: 'What is the capital of France?' }],
},
],
});

// Test 3: Error handling
try {
await client.models.generateContent({
model: 'error-model',
contents: [
{
role: 'user',
parts: [{ text: 'This will fail' }],
},
],
});
} catch (error) {
// Expected error
}
});

server.close();
}

run();
Original file line number Diff line number Diff line change
@@ -0,0 +1,205 @@
import { afterAll, describe, expect } from 'vitest';
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';

describe('Google GenAI integration', () => {
afterAll(() => {
cleanupChildProcesses();
});

const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
transaction: 'main',
spans: expect.arrayContaining([
// First span - chats.create
expect.objectContaining({
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.request.temperature': 0.8,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 150,
},
description: 'chat gemini-1.5-pro create',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Second span - chat.sendMessage (should get model from context)
expect.objectContaining({
data: {
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
},
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Third span - models.generateContent
expect.objectContaining({
data: {
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-flash',
'gen_ai.request.temperature': 0.7,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 100,
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
},
description: 'models gemini-1.5-flash',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Fourth span - error handling
expect.objectContaining({
data: {
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'error-model',
},
description: 'models error-model',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
status: 'unknown_error',
}),
]),
};

const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
transaction: 'main',
spans: expect.arrayContaining([
// First span - chats.create with PII
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.request.temperature': 0.8,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 150,
'gen_ai.request.messages': expect.any(String), // Should include history when recordInputs: true
}),
description: 'chat gemini-1.5-pro create',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Second span - chat.sendMessage with PII
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Third span - models.generateContent with PII
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-flash',
'gen_ai.request.temperature': 0.7,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 100,
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'models gemini-1.5-flash',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
status: 'ok',
}),
// Fourth span - error handling with PII
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'error-model',
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
}),
description: 'models error-model',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
status: 'unknown_error',
}),
]),
};

const EXPECTED_TRANSACTION_WITH_OPTIONS = {
transaction: 'main',
spans: expect.arrayContaining([
// Check that custom options are respected
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
}),
}),
]),
};

createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
test('creates google genai related spans with sendDefaultPii: false', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
.start()
.completed();
});
});

createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
test('creates google genai related spans with sendDefaultPii: true', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
.start()
.completed();
});
});

createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
test('creates google genai related spans with custom options', async () => {
await createRunner()
.ignore('event')
.expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
.start()
.completed();
});
});
});
1 change: 1 addition & 0 deletions packages/astro/src/index.server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ export {
anthropicAIIntegration,
// eslint-disable-next-line deprecation/deprecation
anrIntegration,
googleGenAIIntegration,
// eslint-disable-next-line deprecation/deprecation
disableAnrDetectionForCallback,
captureCheckIn,
Expand Down
1 change: 1 addition & 0 deletions packages/aws-serverless/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,7 @@ export {
profiler,
amqplibIntegration,
anthropicAIIntegration,
googleGenAIIntegration,
vercelAIIntegration,
logger,
consoleLoggingIntegration,
Expand Down
Loading