Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 21 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,27 @@

- "You miss 100 percent of the chances you don't take. — Wayne Gretzky" — Michael Scott

## 10.14.0

### Important Changes

- **feat(cloudflare,vercel-edge): Add support for Google Gen AI instrumentation ([#17723](https://github.com/getsentry/sentry-javascript/pull/17723))**

The SDK now automatically instruments Google's Generative AI operations in Cloudflare Workers and Vercel Edge Runtime environments, providing insights into your AI operations.

### Other Changes

- fix(nextjs): Display updated turbopack warnings ([#17737](https://github.com/getsentry/sentry-javascript/pull/17737))
- ref(core): Wrap isolationscope in `WeakRef` when storing it on spans ([#17712](https://github.com/getsentry/sentry-javascript/pull/17712))

<details>
<summary> <strong>Internal Changes</strong> </summary>

- test(node): Avoid using specific port for node-integration-tests ([#17729](https://github.com/getsentry/sentry-javascript/pull/17729))
- test(nuxt): Update Nuxt version and add Nitro $fetch test ([#17713](https://github.com/getsentry/sentry-javascript/pull/17713))

</details>

## 10.13.0

### Important Changes
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import * as Sentry from '@sentry/cloudflare';
import type { GoogleGenAIClient } from '@sentry/core';
import { MockGoogleGenAI } from './mocks';

interface Env {
SENTRY_DSN: string;
}

const mockClient = new MockGoogleGenAI({
apiKey: 'mock-api-key',
});

const client: GoogleGenAIClient = Sentry.instrumentGoogleGenAIClient(mockClient);

export default Sentry.withSentry(
(env: Env) => ({
dsn: env.SENTRY_DSN,
tracesSampleRate: 1.0,
}),
{
async fetch(_request, _env, _ctx) {
// Test 1: chats.create and sendMessage flow
const chat = client.chats.create({
model: 'gemini-1.5-pro',
config: {
temperature: 0.8,
topP: 0.9,
maxOutputTokens: 150,
},
history: [
{
role: 'user',
parts: [{ text: 'Hello, how are you?' }],
},
],
});

const chatResponse = await chat.sendMessage({
message: 'Tell me a joke',
});

// Test 2: models.generateContent
const modelResponse = await client.models.generateContent({
model: 'gemini-1.5-flash',
config: {
temperature: 0.7,
topP: 0.9,
maxOutputTokens: 100,
},
contents: [
{
role: 'user',
parts: [{ text: 'What is the capital of France?' }],
},
],
});

return new Response(JSON.stringify({ chatResponse, modelResponse }));
},
},
);
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
import type { GoogleGenAIChat, GoogleGenAIClient, GoogleGenAIResponse } from '@sentry/core';

export class MockGoogleGenAI implements GoogleGenAIClient {
public models: {
generateContent: (...args: unknown[]) => Promise<GoogleGenAIResponse>;
// eslint-disable-next-line @typescript-eslint/no-explicit-any
generateContentStream: (...args: unknown[]) => Promise<AsyncGenerator<GoogleGenAIResponse, any, unknown>>;
};
public chats: {
create: (...args: unknown[]) => GoogleGenAIChat;
};
public apiKey: string;

public constructor(config: { apiKey: string }) {
this.apiKey = config.apiKey;

// models.generateContent functionality
this.models = {
generateContent: async (...args: unknown[]) => {
const params = args[0] as { model: string; contents?: unknown };
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

if (params.model === 'error-model') {
const error = new Error('Model not found');
(error as unknown as { status: number }).status = 404;
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
throw error;
}

return {
candidates: [
{
content: {
parts: [
{
text: 'Hello from Google GenAI mock!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
};
},
generateContentStream: async () => {
// Return a promise that resolves to an async generator
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};

// chats.create implementation
this.chats = {
create: (...args: unknown[]) => {
const params = args[0] as { model: string; config?: Record<string, unknown> };
const model = params.model;

return {
modelVersion: model,
sendMessage: async (..._messageArgs: unknown[]) => {
// Simulate processing time
await new Promise(resolve => setTimeout(resolve, 10));

return {
candidates: [
{
content: {
parts: [
{
text: 'This is a joke from the chat!',
},
],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
usageMetadata: {
promptTokenCount: 8,
candidatesTokenCount: 12,
totalTokenCount: 20,
},
modelVersion: model, // Include model version in response
};
},
sendMessageStream: async () => {
// Return a promise that resolves to an async generator
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
yield {
candidates: [
{
content: {
parts: [{ text: 'Streaming chat response' }],
role: 'model',
},
finishReason: 'stop',
index: 0,
},
],
};
})();
},
};
},
};
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,75 @@
import { expect, it } from 'vitest';
import { createRunner } from '../../../runner';

// These tests are not exhaustive because the instrumentation is
// already tested in the node integration tests and we merely
// want to test that the instrumentation does not break in our
// cloudflare SDK.

it('traces Google GenAI chat creation and message sending', async () => {
const runner = createRunner(__dirname)
.ignore('event')
.expect(envelope => {
const transactionEvent = envelope[1]?.[0]?.[1] as any;

expect(transactionEvent.transaction).toBe('GET /');
expect(transactionEvent.spans).toEqual(
expect.arrayContaining([
// First span - chats.create
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.request.temperature': 0.8,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 150,
}),
description: 'chat gemini-1.5-pro create',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
}),
// Second span - chat.sendMessage
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'chat',
'sentry.op': 'gen_ai.chat',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-pro',
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'chat gemini-1.5-pro',
op: 'gen_ai.chat',
origin: 'auto.ai.google_genai',
}),
// Third span - models.generateContent
expect.objectContaining({
data: expect.objectContaining({
'gen_ai.operation.name': 'models',
'sentry.op': 'gen_ai.models',
'sentry.origin': 'auto.ai.google_genai',
'gen_ai.system': 'google_genai',
'gen_ai.request.model': 'gemini-1.5-flash',
'gen_ai.request.temperature': 0.7,
'gen_ai.request.top_p': 0.9,
'gen_ai.request.max_tokens': 100,
'gen_ai.usage.input_tokens': 8,
'gen_ai.usage.output_tokens': 12,
'gen_ai.usage.total_tokens': 20,
}),
description: 'models gemini-1.5-flash',
op: 'gen_ai.models',
origin: 'auto.ai.google_genai',
}),
]),
);
})
.start();
await runner.makeRequest('get', '/');
await runner.completed();
});
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"name": "worker-name",
"compatibility_date": "2025-06-17",
"main": "index.ts",
"compatibility_flags": ["nodejs_compat"],
}
20 changes: 0 additions & 20 deletions dev-packages/e2e-tests/test-applications/nuxt-4/app/app.vue

This file was deleted.

Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
<template>
<div>
<button @click="fetchError">Fetch Server API Error</button>
<button @click="fetchNitroFetch">Fetch Nitro $fetch</button>
</div>
</template>

Expand All @@ -10,4 +11,8 @@ import { useFetch } from '#imports';
const fetchError = async () => {
await useFetch('/api/server-error');
};

const fetchNitroFetch = async () => {
await useFetch('/api/nitro-fetch');
};
</script>
Original file line number Diff line number Diff line change
@@ -1,3 +1,20 @@
<template>
<h1>Hello!</h1>
<NuxtLayout>
<header>
<nav>
<ul>
<li><NuxtLink to="/fetch-server-routes">Fetch Server Routes</NuxtLink></li>
<li><NuxtLink to="/test-param/1234">Fetch Param</NuxtLink></li>
<li><NuxtLink to="/client-error">Client Error</NuxtLink></li>
</ul>
</nav>
</header>
<NuxtPage />
</NuxtLayout>
</template>

<script setup lang="ts">
import { useSentryTestTag } from '#imports';

useSentryTestTag();
</script>
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"dependencies": {
"@pinia/nuxt": "^0.5.5",
"@sentry/nuxt": "latest || *",
"nuxt": "^4.0.0-alpha.4"
"nuxt": "^4.1.2"
},
"devDependencies": {
"@playwright/test": "~1.53.2",
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import { defineEventHandler } from '#imports';

export default defineEventHandler(async () => {
return await $fetch('https://example.com');
});
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ test.describe('server-side errors', async () => {
return errorEvent?.exception?.values?.[0]?.value === 'Nuxt 4 Server error';
});

await page.goto(`/fetch-server-error`);
await page.goto(`/fetch-server-routes`);
await page.getByText('Fetch Server API Error', { exact: true }).click();

const error = await errorPromise;
Expand Down
Loading
Loading