diff --git a/dev-packages/node-integration-tests/package.json b/dev-packages/node-integration-tests/package.json index 3deeb1ae0df4..15dd3b68d3a8 100644 --- a/dev-packages/node-integration-tests/package.json +++ b/dev-packages/node-integration-tests/package.json @@ -24,6 +24,7 @@ }, "dependencies": { "@aws-sdk/client-s3": "^3.552.0", + "@google/genai": "^1.20.0", "@hapi/hapi": "^21.3.10", "@nestjs/common": "11.1.3", "@nestjs/core": "11.1.3", diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs new file mode 100644 index 000000000000..9823f5680be3 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-options.mjs @@ -0,0 +1,23 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + integrations: [ + Sentry.googleGenAIIntegration({ + recordInputs: true, + recordOutputs: true, + }), + ], + beforeSendTransaction: event => { + // Filter out mock express server transactions + if (event.transaction.includes('/v1beta/')) { + return null; + } + return event; + }, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs new file mode 100644 index 000000000000..fa0a1136283d --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument-with-pii.mjs @@ -0,0 +1,17 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: true, + transport: loggingTransport, + beforeSendTransaction: event => { + // Filter out mock express server transactions + if (event.transaction.includes('/v1beta/')) { + return null; + } + return event; + }, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs new file mode 100644 index 000000000000..9bcfb96ac103 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/instrument.mjs @@ -0,0 +1,17 @@ +import * as Sentry from '@sentry/node'; +import { loggingTransport } from '@sentry-internal/node-integration-tests'; + +Sentry.init({ + dsn: 'https://public@dsn.ingest.sentry.io/1337', + release: '1.0', + tracesSampleRate: 1.0, + sendDefaultPii: false, + transport: loggingTransport, + beforeSendTransaction: event => { + // Filter out mock express server transactions + if (event.transaction.includes('/v1beta')) { + return null; + } + return event; + }, +}); diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs b/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs new file mode 100644 index 000000000000..cfae135b6878 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs @@ -0,0 +1,109 @@ +import { GoogleGenAI } from '@google/genai'; +import * as Sentry from '@sentry/node'; +import express from 'express'; + +const PORT = 3333; + +function startMockGoogleGenAIServer() { + const app = express(); + app.use(express.json()); + + app.post('/v1beta/models/:model\\:generateContent', (req, res) => { + const model = req.params.model; + + if (model === 'error-model') { + res.status(404).set('x-request-id', 'mock-request-123').end('Model not found'); + return; + } + + res.send({ + candidates: [ + { + content: { + parts: [ + { + text: 'Mock response from Google GenAI!', + }, + ], + role: 'model', + }, + finishReason: 'stop', + index: 0, + }, + ], + usageMetadata: { + promptTokenCount: 8, + candidatesTokenCount: 12, + totalTokenCount: 20, + }, + }); + }); + + return app.listen(PORT); +} + +async function run() { + const server = startMockGoogleGenAIServer(); + + await Sentry.startSpan({ op: 'function', name: 'main' }, async () => { + const client = new GoogleGenAI({ + apiKey: 'mock-api-key', + httpOptions: { baseUrl: `http://localhost:${PORT}` }, + }); + + // Test 1: chats.create and sendMessage flow + const chat = client.chats.create({ + model: 'gemini-1.5-pro', + config: { + temperature: 0.8, + topP: 0.9, + maxOutputTokens: 150, + }, + history: [ + { + role: 'user', + parts: [{ text: 'Hello, how are you?' }], + }, + ], + }); + + await chat.sendMessage({ + message: 'Tell me a joke', + }); + + // Test 2: models.generateContent + await client.models.generateContent({ + model: 'gemini-1.5-flash', + config: { + temperature: 0.7, + topP: 0.9, + maxOutputTokens: 100, + }, + contents: [ + { + role: 'user', + parts: [{ text: 'What is the capital of France?' }], + }, + ], + }); + + // Test 3: Error handling + try { + await client.models.generateContent({ + model: 'error-model', + contents: [ + { + role: 'user', + parts: [{ text: 'This will fail' }], + }, + ], + }); + } catch (error) { + // Expected error + } + }); + + server.close(); +} + +run(); diff --git a/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts new file mode 100644 index 000000000000..9aa5523c61d7 --- /dev/null +++ b/dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts @@ -0,0 +1,205 @@ +import { afterAll, describe, expect } from 'vitest'; +import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner'; + +describe('Google GenAI integration', () => { + afterAll(() => { + cleanupChildProcesses(); + }); + + const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - chats.create + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + 'gen_ai.request.temperature': 0.8, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, + }, + description: 'chat gemini-1.5-pro create', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Second span - chat.sendMessage (should get model from context) + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }, + description: 'chat gemini-1.5-pro', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Third span - models.generateContent + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'models', + 'sentry.op': 'gen_ai.models', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-flash', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }, + description: 'models gemini-1.5-flash', + op: 'gen_ai.models', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Fourth span - error handling + expect.objectContaining({ + data: { + 'gen_ai.operation.name': 'models', + 'sentry.op': 'gen_ai.models', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'error-model', + }, + description: 'models error-model', + op: 'gen_ai.models', + origin: 'auto.ai.google_genai', + status: 'unknown_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = { + transaction: 'main', + spans: expect.arrayContaining([ + // First span - chats.create with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + 'gen_ai.request.temperature': 0.8, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 150, + 'gen_ai.request.messages': expect.any(String), // Should include history when recordInputs: true + }), + description: 'chat gemini-1.5-pro create', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Second span - chat.sendMessage with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'chat', + 'sentry.op': 'gen_ai.chat', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-pro', + 'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }), + description: 'chat gemini-1.5-pro', + op: 'gen_ai.chat', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Third span - models.generateContent with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'models', + 'sentry.op': 'gen_ai.models', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'gemini-1.5-flash', + 'gen_ai.request.temperature': 0.7, + 'gen_ai.request.top_p': 0.9, + 'gen_ai.request.max_tokens': 100, + 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true + 'gen_ai.usage.input_tokens': 8, + 'gen_ai.usage.output_tokens': 12, + 'gen_ai.usage.total_tokens': 20, + }), + description: 'models gemini-1.5-flash', + op: 'gen_ai.models', + origin: 'auto.ai.google_genai', + status: 'ok', + }), + // Fourth span - error handling with PII + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.operation.name': 'models', + 'sentry.op': 'gen_ai.models', + 'sentry.origin': 'auto.ai.google_genai', + 'gen_ai.system': 'google_genai', + 'gen_ai.request.model': 'error-model', + 'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true + }), + description: 'models error-model', + op: 'gen_ai.models', + origin: 'auto.ai.google_genai', + status: 'unknown_error', + }), + ]), + }; + + const EXPECTED_TRANSACTION_WITH_OPTIONS = { + transaction: 'main', + spans: expect.arrayContaining([ + // Check that custom options are respected + expect.objectContaining({ + data: expect.objectContaining({ + 'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true + 'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true + }), + }), + ]), + }; + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => { + test('creates google genai related spans with sendDefaultPii: false', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE }) + .start() + .completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => { + test('creates google genai related spans with sendDefaultPii: true', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE }) + .start() + .completed(); + }); + }); + + createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => { + test('creates google genai related spans with custom options', async () => { + await createRunner() + .ignore('event') + .expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS }) + .start() + .completed(); + }); + }); +}); diff --git a/packages/astro/src/index.server.ts b/packages/astro/src/index.server.ts index 5abf8d51633d..de4079c4b5c4 100644 --- a/packages/astro/src/index.server.ts +++ b/packages/astro/src/index.server.ts @@ -15,6 +15,7 @@ export { anthropicAIIntegration, // eslint-disable-next-line deprecation/deprecation anrIntegration, + googleGenAIIntegration, // eslint-disable-next-line deprecation/deprecation disableAnrDetectionForCallback, captureCheckIn, diff --git a/packages/aws-serverless/src/index.ts b/packages/aws-serverless/src/index.ts index 541f8a97a410..0cbe5879b02e 100644 --- a/packages/aws-serverless/src/index.ts +++ b/packages/aws-serverless/src/index.ts @@ -125,6 +125,7 @@ export { profiler, amqplibIntegration, anthropicAIIntegration, + googleGenAIIntegration, vercelAIIntegration, logger, consoleLoggingIntegration, diff --git a/packages/bun/src/index.ts b/packages/bun/src/index.ts index bc5bf37c0de4..b1c4854e5026 100644 --- a/packages/bun/src/index.ts +++ b/packages/bun/src/index.ts @@ -143,6 +143,7 @@ export { profiler, amqplibIntegration, anthropicAIIntegration, + googleGenAIIntegration, vercelAIIntegration, logger, consoleLoggingIntegration, diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index b971aa8b43a3..b4c37b312e80 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -131,6 +131,8 @@ export { instrumentOpenAiClient } from './utils/openai'; export { OPENAI_INTEGRATION_NAME } from './utils/openai/constants'; export { instrumentAnthropicAiClient } from './utils/anthropic-ai'; export { ANTHROPIC_AI_INTEGRATION_NAME } from './utils/anthropic-ai/constants'; +export { instrumentGoogleGenAIClient } from './utils/google-genai'; +export { GOOGLE_GENAI_INTEGRATION_NAME } from './utils/google-genai/constants'; export type { OpenAiClient, OpenAiOptions, InstrumentedMethod } from './utils/openai/types'; export type { AnthropicAiClient, @@ -138,6 +140,12 @@ export type { AnthropicAiInstrumentedMethod, AnthropicAiResponse, } from './utils/anthropic-ai/types'; +export type { + GoogleGenAIClient, + GoogleGenAIChat, + GoogleGenAIOptions, + GoogleGenAIIstrumentedMethod, +} from './utils/google-genai/types'; export type { FeatureFlag } from './utils/featureFlags'; export { @@ -207,6 +215,7 @@ export { basename, dirname, isAbsolute, join, normalizePath, relative, resolve } export { makePromiseBuffer, SENTRY_BUFFER_FULL_ERROR } from './utils/promisebuffer'; export type { PromiseBuffer } from './utils/promisebuffer'; export { severityLevelFromString } from './utils/severity'; +export { replaceExports } from './utils/exports'; export { UNKNOWN_FUNCTION, createStackParser, diff --git a/packages/core/src/utils/ai/utils.ts b/packages/core/src/utils/ai/utils.ts index 2a2952ce6ad8..ecb46d5f0d0d 100644 --- a/packages/core/src/utils/ai/utils.ts +++ b/packages/core/src/utils/ai/utils.ts @@ -20,6 +20,9 @@ export function getFinalOperationName(methodPath: string): string { if (methodPath.includes('models')) { return 'models'; } + if (methodPath.includes('chat')) { + return 'chat'; + } return methodPath.split('.').pop() || 'unknown'; } diff --git a/packages/core/src/utils/exports.ts b/packages/core/src/utils/exports.ts new file mode 100644 index 000000000000..588e758e88f9 --- /dev/null +++ b/packages/core/src/utils/exports.ts @@ -0,0 +1,47 @@ +/** + * Replaces constructor functions in module exports, handling read-only properties, + * and both default and named exports by wrapping them with the constructor. + * + * @param exports The module exports object to modify + * @param exportName The name of the export to replace (e.g., 'GoogleGenAI', 'Anthropic', 'OpenAI') + * @param wrappedConstructor The wrapped constructor function to replace the original with + * @returns void + */ +export function replaceExports( + exports: { [key: string]: unknown }, + exportName: string, + wrappedConstructor: unknown, +): void { + const original = exports[exportName]; + + if (typeof original !== 'function') { + return; + } + + // Replace the named export - handle read-only properties + try { + exports[exportName] = wrappedConstructor; + } catch (error) { + // If direct assignment fails, override the property descriptor + Object.defineProperty(exports, exportName, { + value: wrappedConstructor, + writable: true, + configurable: true, + enumerable: true, + }); + } + + // Replace the default export if it points to the original constructor + if (exports.default === original) { + try { + exports.default = wrappedConstructor; + } catch (error) { + Object.defineProperty(exports, 'default', { + value: wrappedConstructor, + writable: true, + configurable: true, + enumerable: true, + }); + } + } +} diff --git a/packages/core/src/utils/google-genai/constants.ts b/packages/core/src/utils/google-genai/constants.ts new file mode 100644 index 000000000000..8617460482c6 --- /dev/null +++ b/packages/core/src/utils/google-genai/constants.ts @@ -0,0 +1,10 @@ +export const GOOGLE_GENAI_INTEGRATION_NAME = 'Google_GenAI'; + +// https://ai.google.dev/api/rest/v1/models/generateContent +// https://ai.google.dev/api/rest/v1/chats/sendMessage +export const GOOGLE_GENAI_INSTRUMENTED_METHODS = ['models.generateContent', 'chats.create', 'sendMessage'] as const; + +// Constants for internal use +export const GOOGLE_GENAI_SYSTEM_NAME = 'google_genai'; +export const CHATS_CREATE_METHOD = 'chats.create'; +export const CHAT_PATH = 'chat'; diff --git a/packages/core/src/utils/google-genai/index.ts b/packages/core/src/utils/google-genai/index.ts new file mode 100644 index 000000000000..cdad221ac60f --- /dev/null +++ b/packages/core/src/utils/google-genai/index.ts @@ -0,0 +1,315 @@ +import { getClient } from '../../currentScopes'; +import { captureException } from '../../exports'; +import { SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN } from '../../semanticAttributes'; +import { startSpan } from '../../tracing/trace'; +import type { Span, SpanAttributeValue } from '../../types-hoist/span'; +import { + GEN_AI_OPERATION_NAME_ATTRIBUTE, + GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE, + GEN_AI_REQUEST_MESSAGES_ATTRIBUTE, + GEN_AI_REQUEST_MODEL_ATTRIBUTE, + GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE, + GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE, + GEN_AI_REQUEST_TOP_K_ATTRIBUTE, + GEN_AI_REQUEST_TOP_P_ATTRIBUTE, + GEN_AI_RESPONSE_TEXT_ATTRIBUTE, + GEN_AI_SYSTEM_ATTRIBUTE, + GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE, + GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE, +} from '../ai/gen-ai-attributes'; +import { buildMethodPath, getFinalOperationName, getSpanOperation } from '../ai/utils'; +import { handleCallbackErrors } from '../handleCallbackErrors'; +import { CHAT_PATH, CHATS_CREATE_METHOD, GOOGLE_GENAI_SYSTEM_NAME } from './constants'; +import type { + Candidate, + ContentPart, + GoogleGenAIIstrumentedMethod, + GoogleGenAIOptions, + GoogleGenAIResponse, +} from './types'; +import { shouldInstrument } from './utils'; + +/** + * Extract model from parameters or chat context object + * For chat instances, the model is available on the chat object as 'model' (older versions) or 'modelVersion' (newer versions) + */ +export function extractModel(params: Record, context?: unknown): string { + if ('model' in params && typeof params.model === 'string') { + return params.model; + } + + // Try to get model from chat context object (chat instance has model property) + if (context && typeof context === 'object') { + const contextObj = context as Record; + + // Check for 'model' property (older versions, and streaming) + if ('model' in contextObj && typeof contextObj.model === 'string') { + return contextObj.model; + } + + // Check for 'modelVersion' property (newer versions) + if ('modelVersion' in contextObj && typeof contextObj.modelVersion === 'string') { + return contextObj.modelVersion; + } + } + + return 'unknown'; +} + +/** + * Extract generation config parameters + */ +function extractConfigAttributes(config: Record): Record { + const attributes: Record = {}; + + if ('temperature' in config && typeof config.temperature === 'number') { + attributes[GEN_AI_REQUEST_TEMPERATURE_ATTRIBUTE] = config.temperature; + } + if ('topP' in config && typeof config.topP === 'number') { + attributes[GEN_AI_REQUEST_TOP_P_ATTRIBUTE] = config.topP; + } + if ('topK' in config && typeof config.topK === 'number') { + attributes[GEN_AI_REQUEST_TOP_K_ATTRIBUTE] = config.topK; + } + if ('maxOutputTokens' in config && typeof config.maxOutputTokens === 'number') { + attributes[GEN_AI_REQUEST_MAX_TOKENS_ATTRIBUTE] = config.maxOutputTokens; + } + if ('frequencyPenalty' in config && typeof config.frequencyPenalty === 'number') { + attributes[GEN_AI_REQUEST_FREQUENCY_PENALTY_ATTRIBUTE] = config.frequencyPenalty; + } + if ('presencePenalty' in config && typeof config.presencePenalty === 'number') { + attributes[GEN_AI_REQUEST_PRESENCE_PENALTY_ATTRIBUTE] = config.presencePenalty; + } + + return attributes; +} + +/** + * Extract request attributes from method arguments + * Builds the base attributes for span creation including system info, model, and config + */ +function extractRequestAttributes( + args: unknown[], + methodPath: string, + context?: unknown, +): Record { + const attributes: Record = { + [GEN_AI_SYSTEM_ATTRIBUTE]: GOOGLE_GENAI_SYSTEM_NAME, + [GEN_AI_OPERATION_NAME_ATTRIBUTE]: getFinalOperationName(methodPath), + [SEMANTIC_ATTRIBUTE_SENTRY_ORIGIN]: 'auto.ai.google_genai', + }; + + if (args.length > 0 && typeof args[0] === 'object' && args[0] !== null) { + const params = args[0] as Record; + + attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = extractModel(params, context); + + // Extract generation config parameters + if ('config' in params && typeof params.config === 'object' && params.config) { + Object.assign(attributes, extractConfigAttributes(params.config as Record)); + } + } else { + attributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] = extractModel({}, context); + } + + return attributes; +} + +/** + * Add private request attributes to spans. + * This is only recorded if recordInputs is true. + * Handles different parameter formats for different Google GenAI methods. + */ +function addPrivateRequestAttributes(span: Span, params: Record): void { + // For models.generateContent: ContentListUnion: Content | Content[] | PartUnion | PartUnion[] + if ('contents' in params) { + span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(params.contents) }); + } + + // For chat.sendMessage: message can be string or Part[] + if ('message' in params) { + span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(params.message) }); + } + + // For chats.create: history contains the conversation history + if ('history' in params) { + span.setAttributes({ [GEN_AI_REQUEST_MESSAGES_ATTRIBUTE]: JSON.stringify(params.history) }); + } +} + +/** + * Add response attributes from the Google GenAI response + * @see https://github.com/googleapis/js-genai/blob/v1.19.0/src/types.ts#L2313 + */ +function addResponseAttributes(span: Span, response: GoogleGenAIResponse, recordOutputs?: boolean): void { + if (!response || typeof response !== 'object') return; + + // Add usage metadata if present + if (response.usageMetadata && typeof response.usageMetadata === 'object') { + const usage = response.usageMetadata; + if (typeof usage.promptTokenCount === 'number') { + span.setAttributes({ + [GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE]: usage.promptTokenCount, + }); + } + if (typeof usage.candidatesTokenCount === 'number') { + span.setAttributes({ + [GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE]: usage.candidatesTokenCount, + }); + } + if (typeof usage.totalTokenCount === 'number') { + span.setAttributes({ + [GEN_AI_USAGE_TOTAL_TOKENS_ATTRIBUTE]: usage.totalTokenCount, + }); + } + } + + // Add response text if recordOutputs is enabled + if (recordOutputs && Array.isArray(response.candidates) && response.candidates.length > 0) { + const responseTexts = response.candidates + .map((candidate: Candidate) => { + if (candidate.content?.parts && Array.isArray(candidate.content.parts)) { + return candidate.content.parts + .map((part: ContentPart) => (typeof part.text === 'string' ? part.text : '')) + .filter((text: string) => text.length > 0) + .join(''); + } + return ''; + }) + .filter((text: string) => text.length > 0); + + if (responseTexts.length > 0) { + span.setAttributes({ + [GEN_AI_RESPONSE_TEXT_ATTRIBUTE]: responseTexts.join(''), + }); + } + } +} + +/** + * Instrument any async or synchronous genai method with Sentry spans + * Handles operations like models.generateContent and chat.sendMessage and chats.create + * @see https://docs.sentry.io/platforms/javascript/guides/node/tracing/instrumentation/ai-agents-module/#manual-instrumentation + */ +function instrumentMethod( + originalMethod: (...args: T) => R | Promise, + methodPath: GoogleGenAIIstrumentedMethod, + context: unknown, + options: GoogleGenAIOptions, +): (...args: T) => R | Promise { + const isSyncCreate = methodPath === CHATS_CREATE_METHOD; + + const run = (...args: T): R | Promise => { + const requestAttributes = extractRequestAttributes(args, methodPath, context); + const model = requestAttributes[GEN_AI_REQUEST_MODEL_ATTRIBUTE] ?? 'unknown'; + const operationName = getFinalOperationName(methodPath); + + // Single span for both sync and async operations + return startSpan( + { + name: isSyncCreate ? `${operationName} ${model} create` : `${operationName} ${model}`, + op: getSpanOperation(methodPath), + attributes: requestAttributes, + }, + (span: Span) => { + if (options.recordInputs && args[0] && typeof args[0] === 'object') { + addPrivateRequestAttributes(span, args[0] as Record); + } + + return handleCallbackErrors( + () => originalMethod.apply(context, args), + error => { + captureException(error, { + mechanism: { handled: false, type: 'auto.ai.google_genai', data: { function: methodPath } }, + }); + }, + () => {}, + result => { + // Only add response attributes for content-producing methods, not for chats.create + if (!isSyncCreate) { + addResponseAttributes(span, result, options.recordOutputs); + } + }, + ); + }, + ); + }; + + return run; +} + +/** + * Create a deep proxy for Google GenAI client instrumentation + * Recursively instruments methods and handles special cases like chats.create + */ +function createDeepProxy(target: T, currentPath = '', options: GoogleGenAIOptions): T { + return new Proxy(target, { + get: (t, prop, receiver) => { + const value = Reflect.get(t, prop, receiver); + const methodPath = buildMethodPath(currentPath, String(prop)); + + if (typeof value === 'function' && shouldInstrument(methodPath)) { + // Special case: chats.create is synchronous but needs both instrumentation AND result proxying + if (methodPath === CHATS_CREATE_METHOD) { + const instrumentedMethod = instrumentMethod(value as (...args: unknown[]) => unknown, methodPath, t, options); + return function instrumentedAndProxiedCreate(...args: unknown[]): unknown { + const result = instrumentedMethod(...args); + // If the result is an object (like a chat instance), proxy it too + if (result && typeof result === 'object') { + return createDeepProxy(result, CHAT_PATH, options); + } + return result; + }; + } + + return instrumentMethod(value as (...args: unknown[]) => Promise, methodPath, t, options); + } + + if (typeof value === 'function') { + // Bind non-instrumented functions to preserve the original `this` context + return value.bind(t); + } + + if (value && typeof value === 'object') { + return createDeepProxy(value, methodPath, options); + } + + return value; + }, + }) as T; +} + +/** + * Instrument a Google GenAI client with Sentry tracing + * Can be used across Node.js, Cloudflare Workers, and Vercel Edge + * + * @template T - The type of the client that extends client object + * @param client - The Google GenAI client to instrument + * @param options - Optional configuration for recording inputs and outputs + * @returns The instrumented client with the same type as the input + * + * @example + * ```typescript + * import { GoogleGenerativeAI } from '@google/genai'; + * import { instrumentGoogleGenAIClient } from '@sentry/core'; + * + * const genAI = new GoogleGenerativeAI({ apiKey: process.env.GOOGLE_GENAI_API_KEY }); + * const instrumentedClient = instrumentGoogleGenAIClient(genAI); + * + * // Now both chats.create and sendMessage will be instrumented + * const chat = instrumentedClient.chats.create({ model: 'gemini-1.5-pro' }); + * const response = await chat.sendMessage({ message: 'Hello' }); + * ``` + */ +export function instrumentGoogleGenAIClient(client: T, options?: GoogleGenAIOptions): T { + const sendDefaultPii = Boolean(getClient()?.getOptions().sendDefaultPii); + + const _options = { + recordInputs: sendDefaultPii, + recordOutputs: sendDefaultPii, + ...options, + }; + return createDeepProxy(client, '', _options); +} diff --git a/packages/core/src/utils/google-genai/types.ts b/packages/core/src/utils/google-genai/types.ts new file mode 100644 index 000000000000..9a2138a7843d --- /dev/null +++ b/packages/core/src/utils/google-genai/types.ts @@ -0,0 +1,185 @@ +import type { GOOGLE_GENAI_INSTRUMENTED_METHODS } from './constants'; + +export interface GoogleGenAIOptions { + /** + * Enable or disable input recording. + */ + recordInputs?: boolean; + /** + * Enable or disable output recording. + */ + recordOutputs?: boolean; +} + +/** + * Google GenAI Content Part + * @see https://ai.google.dev/api/rest/v1/Content#Part + * @see https://github.com/googleapis/js-genai/blob/v1.19.0/src/types.ts#L1061 + * + */ +export type ContentPart = { + /** Metadata for a given video. */ + videoMetadata?: unknown; + /** Indicates if the part is thought from the model. */ + thought?: boolean; + /** Optional. Inlined bytes data. */ + inlineData?: Blob; + /** Optional. URI based data. */ + fileData?: unknown; + /** An opaque signature for the thought so it can be reused in subsequent requests. + * @remarks Encoded as base64 string. */ + thoughtSignature?: string; + /** A predicted [FunctionCall] returned from the model that contains a string + representing the [FunctionDeclaration.name] and a structured JSON object + containing the parameters and their values. */ + functionCall?: { + /** The unique id of the function call. If populated, the client to execute the + `function_call` and return the response with the matching `id`. */ + id?: string; + /** Optional. The function parameters and values in JSON object format. See [FunctionDeclaration.parameters] for parameter details. */ + args?: Record; + /** Required. The name of the function to call. Matches [FunctionDeclaration.name]. */ + name?: string; + }; + /** Optional. Result of executing the [ExecutableCode]. */ + codeExecutionResult?: unknown; + /** Optional. Code generated by the model that is meant to be executed. */ + executableCode?: unknown; + /** Optional. The result output of a [FunctionCall] that contains a string representing the [FunctionDeclaration.name] and a structured JSON object containing any output from the function call. It is used as context to the model. */ + functionResponse?: unknown; + /** Optional. Text part (can be code). */ + text?: string; +}; + +/** + * Google GenAI Content + * @see https://ai.google.dev/api/rest/v1/Content + */ +type Content = { + /** List of parts that constitute a single message. + * Each part may have a different IANA MIME type. */ + parts?: ContentPart[]; + /** Optional. The producer of the content. Must be either 'user' or + * 'model'. Useful to set for multi-turn conversations, otherwise can be + * empty. If role is not specified, SDK will determine the role. + */ + role?: string; +}; + +type MediaModality = 'MODALITY_UNSPECIFIED' | 'TEXT' | 'IMAGE' | 'VIDEO' | 'AUDIO' | 'DOCUMENT'; + +/** + * Google GenAI Modality Token Count + * @see https://ai.google.dev/api/rest/v1/ModalityTokenCount + */ +type ModalityTokenCount = { + /** The modality associated with this token count. */ + modality?: MediaModality; + /** Number of tokens. */ + tokenCount?: number; +}; + +/** + * Google GenAI Usage Metadata + * @see https://ai.google.dev/api/rest/v1/GenerateContentResponse#UsageMetadata + */ +type GenerateContentResponseUsageMetadata = { + [key: string]: unknown; + /** Output only. List of modalities of the cached content in the request input. */ + cacheTokensDetails?: ModalityTokenCount[]; + /** Output only. Number of tokens in the cached part in the input (the cached content). */ + cachedContentTokenCount?: number; + /** Number of tokens in the response(s). */ + candidatesTokenCount?: number; + /** Output only. List of modalities that were returned in the response. */ + candidatesTokensDetails?: ModalityTokenCount[]; + /** Number of tokens in the request. When `cached_content` is set, this is still the total effective prompt size meaning this includes the number of tokens in the cached content. */ + promptTokenCount?: number; + /** Output only. List of modalities that were processed in the request input. */ + promptTokensDetails?: ModalityTokenCount[]; + /** Output only. Number of tokens present in thoughts output. */ + thoughtsTokenCount?: number; + /** Output only. Number of tokens present in tool-use prompt(s). */ + toolUsePromptTokenCount?: number; + /** Output only. List of modalities that were processed for tool-use request inputs. */ + toolUsePromptTokensDetails?: ModalityTokenCount[]; + /** Total token count for prompt, response candidates, and tool-use prompts (if present). */ + totalTokenCount?: number; +}; + +/** + * Google GenAI Candidate + * @see https://ai.google.dev/api/rest/v1/Candidate + * https://github.com/googleapis/js-genai/blob/v1.19.0/src/types.ts#L2237 + */ +export type Candidate = { + [key: string]: unknown; + /** + * Contains the multi-part content of the response. + */ + content?: Content; + /** + * The reason why the model stopped generating tokens. + * If empty, the model has not stopped generating the tokens. + */ + finishReason?: string; + /** + * Number of tokens for this candidate. + */ + tokenCount?: number; + /** + * The index of the candidate. + */ + index?: number; +}; + +/** + * Google GenAI Generate Content Response + * @see https://ai.google.dev/api/rest/v1/GenerateContentResponse + */ +type GenerateContentResponse = { + [key: string]: unknown; + /** Response variations returned by the model. */ + candidates?: Candidate[]; + /** Timestamp when the request is made to the server. */ + automaticFunctionCallingHistory?: Content[]; + /** Output only. The model version used to generate the response. */ + modelVersion?: string; + /** Output only. Content filter results for a prompt sent in the request. Note: Sent only in the first stream chunk. Only happens when no candidates were generated due to content violations. */ + promptFeedback?: Record; + /** Output only. response_id is used to identify each response. It is the encoding of the event_id. */ + responseId?: string; + /** Usage metadata about the response(s). */ + usageMetadata?: GenerateContentResponseUsageMetadata; +}; + +/** + * Basic interface for Google GenAI client with only the instrumented methods + * This provides type safety while being generic enough to work with different client implementations + */ +export interface GoogleGenAIClient { + models: { + generateContent: (...args: unknown[]) => Promise; + // https://googleapis.github.io/js-genai/release_docs/classes/models.Models.html#generatecontentstream + // eslint-disable-next-line @typescript-eslint/no-explicit-any + generateContentStream: (...args: unknown[]) => Promise>; + }; + chats: { + create: (...args: unknown[]) => GoogleGenAIChat; + }; +} + +/** + * Google GenAI Chat interface for chat instances created via chats.create() + */ +export interface GoogleGenAIChat { + sendMessage: (...args: unknown[]) => Promise; + // https://googleapis.github.io/js-genai/release_docs/classes/chats.Chat.html#sendmessagestream + // eslint-disable-next-line @typescript-eslint/no-explicit-any + sendMessageStream: (...args: unknown[]) => Promise>; +} + +export type GoogleGenAIIstrumentedMethod = (typeof GOOGLE_GENAI_INSTRUMENTED_METHODS)[number]; + +// Export the response type for use in instrumentation +export type GoogleGenAIResponse = GenerateContentResponse; diff --git a/packages/core/src/utils/google-genai/utils.ts b/packages/core/src/utils/google-genai/utils.ts new file mode 100644 index 000000000000..c7a18477c7dd --- /dev/null +++ b/packages/core/src/utils/google-genai/utils.ts @@ -0,0 +1,16 @@ +import { GOOGLE_GENAI_INSTRUMENTED_METHODS } from './constants'; +import type { GoogleGenAIIstrumentedMethod } from './types'; + +/** + * Check if a method path should be instrumented + */ +export function shouldInstrument(methodPath: string): methodPath is GoogleGenAIIstrumentedMethod { + // Check for exact matches first (like 'models.generateContent') + if (GOOGLE_GENAI_INSTRUMENTED_METHODS.includes(methodPath as GoogleGenAIIstrumentedMethod)) { + return true; + } + + // Check for method name matches (like 'sendMessage' from chat instances) + const methodName = methodPath.split('.').pop(); + return GOOGLE_GENAI_INSTRUMENTED_METHODS.includes(methodName as GoogleGenAIIstrumentedMethod); +} diff --git a/packages/google-cloud-serverless/src/index.ts b/packages/google-cloud-serverless/src/index.ts index e8042e4260a8..fc0fe353b919 100644 --- a/packages/google-cloud-serverless/src/index.ts +++ b/packages/google-cloud-serverless/src/index.ts @@ -123,6 +123,7 @@ export { profiler, amqplibIntegration, anthropicAIIntegration, + googleGenAIIntegration, childProcessIntegration, createSentryWinstonTransport, vercelAIIntegration, diff --git a/packages/node/src/index.ts b/packages/node/src/index.ts index 84603db7e575..853ec8dbac2f 100644 --- a/packages/node/src/index.ts +++ b/packages/node/src/index.ts @@ -25,6 +25,7 @@ export { amqplibIntegration } from './integrations/tracing/amqplib'; export { vercelAIIntegration } from './integrations/tracing/vercelai'; export { openAIIntegration } from './integrations/tracing/openai'; export { anthropicAIIntegration } from './integrations/tracing/anthropic-ai'; +export { googleGenAIIntegration } from './integrations/tracing/google-genai'; export { launchDarklyIntegration, buildLaunchDarklyFlagUsedHandler, diff --git a/packages/node/src/integrations/tracing/google-genai/index.ts b/packages/node/src/integrations/tracing/google-genai/index.ts new file mode 100644 index 000000000000..5c1ad09d2fcd --- /dev/null +++ b/packages/node/src/integrations/tracing/google-genai/index.ts @@ -0,0 +1,73 @@ +import type { GoogleGenAIOptions, IntegrationFn } from '@sentry/core'; +import { defineIntegration, GOOGLE_GENAI_INTEGRATION_NAME } from '@sentry/core'; +import { generateInstrumentOnce } from '@sentry/node-core'; +import { SentryGoogleGenAiInstrumentation } from './instrumentation'; + +export const instrumentGoogleGenAI = generateInstrumentOnce( + GOOGLE_GENAI_INTEGRATION_NAME, + options => new SentryGoogleGenAiInstrumentation(options), +); + +const _googleGenAIIntegration = ((options: GoogleGenAIOptions = {}) => { + return { + name: GOOGLE_GENAI_INTEGRATION_NAME, + setupOnce() { + instrumentGoogleGenAI(options); + }, + }; +}) satisfies IntegrationFn; + +/** + * Adds Sentry tracing instrumentation for the Google Generative AI SDK. + * + * This integration is enabled by default. + * + * When configured, this integration automatically instruments Google GenAI SDK client instances + * to capture telemetry data following OpenTelemetry Semantic Conventions for Generative AI. + * + * @example + * ```javascript + * import * as Sentry from '@sentry/node'; + * + * Sentry.init({ + * integrations: [Sentry.googleGenAiIntegration()], + * }); + * ``` + * + * ## Options + * + * - `recordInputs`: Whether to record prompt messages (default: respects `sendDefaultPii` client option) + * - `recordOutputs`: Whether to record response text (default: respects `sendDefaultPii` client option) + * + * ### Default Behavior + * + * By default, the integration will: + * - Record inputs and outputs ONLY if `sendDefaultPii` is set to `true` in your Sentry client options + * - Otherwise, inputs and outputs are NOT recorded unless explicitly enabled + * + * @example + * ```javascript + * // Record inputs and outputs when sendDefaultPii is false + * Sentry.init({ + * integrations: [ + * Sentry.googleGenAiIntegration({ + * recordInputs: true, + * recordOutputs: true + * }) + * ], + * }); + * + * // Never record inputs/outputs regardless of sendDefaultPii + * Sentry.init({ + * sendDefaultPii: true, + * integrations: [ + * Sentry.googleGenAiIntegration({ + * recordInputs: false, + * recordOutputs: false + * }) + * ], + * }); + * ``` + * + */ +export const googleGenAIIntegration = defineIntegration(_googleGenAIIntegration); diff --git a/packages/node/src/integrations/tracing/google-genai/instrumentation.ts b/packages/node/src/integrations/tracing/google-genai/instrumentation.ts new file mode 100644 index 000000000000..cfdb68973be6 --- /dev/null +++ b/packages/node/src/integrations/tracing/google-genai/instrumentation.ts @@ -0,0 +1,102 @@ +import type { InstrumentationConfig } from '@opentelemetry/instrumentation'; +import { + type InstrumentationModuleDefinition, + InstrumentationBase, + InstrumentationNodeModuleDefinition, + InstrumentationNodeModuleFile, +} from '@opentelemetry/instrumentation'; +import type { GoogleGenAIClient, GoogleGenAIOptions } from '@sentry/core'; +import { getClient, instrumentGoogleGenAIClient, replaceExports, SDK_VERSION } from '@sentry/core'; + +const supportedVersions = ['>=0.10.0 <2']; + +/** + * Represents the patched shape of the Google GenAI module export. + */ +interface PatchedModuleExports { + [key: string]: unknown; + GoogleGenAI?: unknown; +} + +type GoogleGenAIInstrumentationOptions = GoogleGenAIOptions & InstrumentationConfig; + +/** + * Sentry Google GenAI instrumentation using OpenTelemetry. + */ +export class SentryGoogleGenAiInstrumentation extends InstrumentationBase { + public constructor(config: GoogleGenAIInstrumentationOptions = {}) { + super('@sentry/instrumentation-google-genai', SDK_VERSION, config); + } + + /** + * Initializes the instrumentation by defining the modules to be patched. + */ + public init(): InstrumentationModuleDefinition { + const module = new InstrumentationNodeModuleDefinition( + '@google/genai', + supportedVersions, + exports => this._patch(exports), + exports => exports, + // In CJS, @google/genai re-exports from (dist/node/index.cjs) file. + // Patching only the root module sometimes misses the real implementation or + // gets overwritten when that file is loaded. We add a file-level patch so that + // _patch runs again on the concrete implementation + [ + new InstrumentationNodeModuleFile( + '@google/genai/dist/node/index.cjs', + supportedVersions, + exports => this._patch(exports), + exports => exports, + ), + ], + ); + return module; + } + + /** + * Core patch logic applying instrumentation to the Google GenAI client constructor. + */ + private _patch(exports: PatchedModuleExports): PatchedModuleExports | void { + const Original = exports.GoogleGenAI; + const config = this.getConfig(); + + if (typeof Original !== 'function') { + return exports; + } + + const WrappedGoogleGenAI = function (this: unknown, ...args: unknown[]): GoogleGenAIClient { + const instance = Reflect.construct(Original, args); + const client = getClient(); + const defaultPii = Boolean(client?.getOptions().sendDefaultPii); + + const typedConfig = config as GoogleGenAIInstrumentationOptions; + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const recordInputs = typedConfig?.recordInputs ?? defaultPii; + // eslint-disable-next-line @typescript-eslint/no-unsafe-member-access + const recordOutputs = typedConfig?.recordOutputs ?? defaultPii; + + return instrumentGoogleGenAIClient(instance, { + recordInputs, + recordOutputs, + }); + }; + + // Preserve static and prototype chains + Object.setPrototypeOf(WrappedGoogleGenAI, Original); + Object.setPrototypeOf(WrappedGoogleGenAI.prototype, Original.prototype); + + for (const key of Object.getOwnPropertyNames(Original)) { + if (!['length', 'name', 'prototype'].includes(key)) { + const descriptor = Object.getOwnPropertyDescriptor(Original, key); + if (descriptor) { + Object.defineProperty(WrappedGoogleGenAI, key, descriptor); + } + } + } + + // Replace google genai exports with the wrapped constructor + replaceExports(exports, 'GoogleGenAI', WrappedGoogleGenAI); + + return exports; + } +} diff --git a/packages/node/src/integrations/tracing/index.ts b/packages/node/src/integrations/tracing/index.ts index 5341bfff3b78..e4dd84fc266e 100644 --- a/packages/node/src/integrations/tracing/index.ts +++ b/packages/node/src/integrations/tracing/index.ts @@ -7,6 +7,7 @@ import { expressIntegration, instrumentExpress } from './express'; import { fastifyIntegration, instrumentFastify, instrumentFastifyV3 } from './fastify'; import { firebaseIntegration, instrumentFirebase } from './firebase'; import { genericPoolIntegration, instrumentGenericPool } from './genericPool'; +import { googleGenAIIntegration, instrumentGoogleGenAI } from './google-genai'; import { graphqlIntegration, instrumentGraphql } from './graphql'; import { hapiIntegration, instrumentHapi } from './hapi'; import { instrumentKafka, kafkaIntegration } from './kafka'; @@ -52,6 +53,7 @@ export function getAutoPerformanceIntegrations(): Integration[] { postgresJsIntegration(), firebaseIntegration(), anthropicAIIntegration(), + googleGenAIIntegration(), ]; } @@ -87,5 +89,6 @@ export function getOpenTelemetryInstrumentationToPreload(): (((options?: any) => instrumentPostgresJs, instrumentFirebase, instrumentAnthropicAi, + instrumentGoogleGenAI, ]; } diff --git a/yarn.lock b/yarn.lock index ebdb2b198675..56ee0f3c9f37 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4266,6 +4266,14 @@ resolved "https://registry.yarnpkg.com/@google-cloud/promisify/-/promisify-2.0.3.tgz#f934b5cdc939e3c7039ff62b9caaf59a9d89e3a8" integrity sha512-d4VSA86eL/AFTe5xtyZX+ePUjE8dIFu2T8zmdeNBSa5/kNgXPCx/o/wbFNHAGLJdGnk1vddRuMESD9HbOC8irw== +"@google/genai@^1.20.0": + version "1.20.0" + resolved "https://registry.npmjs.org/@google/genai/-/genai-1.20.0.tgz#b728bdb383fc58fbb1b92eff26e831ff598688c0" + integrity sha512-QdShxO9LX35jFogy3iKprQNqgKKveux4H2QjOnyIvyHRuGi6PHiz3fjNf8Y0VPY8o5V2fHqR2XqiSVoz7yZs0w== + dependencies: + google-auth-library "^9.14.2" + ws "^8.18.0" + "@graphql-tools/merge@8.3.1": version "8.3.1" resolved "https://registry.yarnpkg.com/@graphql-tools/merge/-/merge-8.3.1.tgz#06121942ad28982a14635dbc87b5d488a041d722" @@ -17459,6 +17467,17 @@ gaxios@^4.0.0: is-stream "^2.0.0" node-fetch "^2.3.0" +gaxios@^6.0.0, gaxios@^6.1.1: + version "6.7.1" + resolved "https://registry.npmjs.org/gaxios/-/gaxios-6.7.1.tgz#ebd9f7093ede3ba502685e73390248bb5b7f71fb" + integrity sha512-LDODD4TMYx7XXdpwxAVRAIAuB0bzv0s+ywFonY46k126qzQHT9ygyoa9tncmOiQmmDrik65UYsEkv3lbfqQ3yQ== + dependencies: + extend "^3.0.2" + https-proxy-agent "^7.0.1" + is-stream "^2.0.0" + node-fetch "^2.6.9" + uuid "^9.0.1" + gcp-metadata@^4.2.0: version "4.2.1" resolved "https://registry.yarnpkg.com/gcp-metadata/-/gcp-metadata-4.2.1.tgz#31849fbcf9025ef34c2297c32a89a1e7e9f2cd62" @@ -17467,6 +17486,15 @@ gcp-metadata@^4.2.0: gaxios "^4.0.0" json-bigint "^1.0.0" +gcp-metadata@^6.1.0: + version "6.1.1" + resolved "https://registry.npmjs.org/gcp-metadata/-/gcp-metadata-6.1.1.tgz#f65aa69f546bc56e116061d137d3f5f90bdec494" + integrity sha512-a4tiq7E0/5fTjxPAaH4jpjkSv/uCaU2p5KC6HVGrvl0cDjA8iBZv4vv1gyzlmK0ZUKqwpOyQMKzZQe3lTit77A== + dependencies: + gaxios "^6.1.1" + google-logging-utils "^0.0.2" + json-bigint "^1.0.0" + generate-function@^2.3.1: version "2.3.1" resolved "https://registry.yarnpkg.com/generate-function/-/generate-function-2.3.1.tgz#f069617690c10c868e73b8465746764f97c3479f" @@ -17978,6 +18006,23 @@ google-auth-library@^7.0.2: jws "^4.0.0" lru-cache "^6.0.0" +google-auth-library@^9.14.2: + version "9.15.1" + resolved "https://registry.npmjs.org/google-auth-library/-/google-auth-library-9.15.1.tgz#0c5d84ed1890b2375f1cd74f03ac7b806b392928" + integrity sha512-Jb6Z0+nvECVz+2lzSMt9u98UsoakXxA2HGHMCxh+so3n90XgYWkq5dur19JAJV7ONiJY22yBTyJB1TSkvPq9Ng== + dependencies: + base64-js "^1.3.0" + ecdsa-sig-formatter "^1.0.11" + gaxios "^6.1.1" + gcp-metadata "^6.1.0" + gtoken "^7.0.0" + jws "^4.0.0" + +google-logging-utils@^0.0.2: + version "0.0.2" + resolved "https://registry.npmjs.org/google-logging-utils/-/google-logging-utils-0.0.2.tgz#5fd837e06fa334da450433b9e3e1870c1594466a" + integrity sha512-NEgUnEcBiP5HrPzufUkBzJOD/Sxsco3rLNo1F1TNf7ieU8ryUzBhqba8r756CjLX7rn3fHl6iLEwPYuqpoKgQQ== + google-p12-pem@^3.0.3: version "3.1.4" resolved "https://registry.yarnpkg.com/google-p12-pem/-/google-p12-pem-3.1.4.tgz#123f7b40da204de4ed1fbf2fd5be12c047fc8b3b" @@ -18053,6 +18098,14 @@ gtoken@^5.0.4: google-p12-pem "^3.0.3" jws "^4.0.0" +gtoken@^7.0.0: + version "7.1.0" + resolved "https://registry.npmjs.org/gtoken/-/gtoken-7.1.0.tgz#d61b4ebd10132222817f7222b1e6064bd463fc26" + integrity sha512-pCcEwRi+TKpMlxAQObHDQ56KawURgyAf6jtIY046fJ5tIv3zDe/LEIubckAO8fj6JnAxLdmWkUfNyulQ2iKdEw== + dependencies: + gaxios "^6.0.0" + jws "^4.0.0" + gud@^1.0.0: version "1.0.0" resolved "https://registry.yarnpkg.com/gud/-/gud-1.0.0.tgz#a489581b17e6a70beca9abe3ae57de7a499852c0" @@ -18785,7 +18838,7 @@ https-proxy-agent@5.0.1, https-proxy-agent@^5.0.0, https-proxy-agent@^5.0.1: agent-base "6" debug "4" -https-proxy-agent@^7.0.0, https-proxy-agent@^7.0.5: +https-proxy-agent@^7.0.0, https-proxy-agent@^7.0.1, https-proxy-agent@^7.0.5: version "7.0.6" resolved "https://registry.yarnpkg.com/https-proxy-agent/-/https-proxy-agent-7.0.6.tgz#da8dfeac7da130b05c2ba4b59c9b6cd66611a6b9" integrity sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw== @@ -23051,7 +23104,7 @@ node-fetch@^1.0.1: encoding "^0.1.11" is-stream "^1.0.1" -node-fetch@^2.3.0, node-fetch@^2.6.1, node-fetch@^2.6.7: +node-fetch@^2.3.0, node-fetch@^2.6.1, node-fetch@^2.6.7, node-fetch@^2.6.9: version "2.7.0" resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== @@ -28483,7 +28536,7 @@ string-template@~0.2.1: string-width@4.2.3, "string-width@^1.0.2 || 2 || 3 || 4", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + resolved "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== dependencies: emoji-regex "^8.0.0" @@ -28593,7 +28646,7 @@ stringify-object@^3.2.1: strip-ansi@6.0.1, strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + resolved "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== dependencies: ansi-regex "^5.0.1" @@ -28770,7 +28823,6 @@ stylus@0.59.0, stylus@^0.59.0: sucrase@^3.27.0, sucrase@^3.35.0, sucrase@getsentry/sucrase#es2020-polyfills: version "3.36.0" - uid fd682f6129e507c00bb4e6319cc5d6b767e36061 resolved "https://codeload.github.com/getsentry/sucrase/tar.gz/fd682f6129e507c00bb4e6319cc5d6b767e36061" dependencies: "@jridgewell/gen-mapping" "^0.3.2" @@ -31658,7 +31710,7 @@ wrangler@4.22.0: wrap-ansi@7.0.0, wrap-ansi@^7.0.0: version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + resolved "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== dependencies: ansi-styles "^4.0.0"