Skip to content

Commit 3e34bd6

Browse files
authored
Merge pull request #17738 from getsentry/prepare-release/10.14.0
meta(changelog): Update changelog for 10.14.0
2 parents 2dfe295 + dfd421b commit 3e34bd6

File tree

25 files changed

+937
-209
lines changed

25 files changed

+937
-209
lines changed

CHANGELOG.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,27 @@
44

55
- "You miss 100 percent of the chances you don't take. — Wayne Gretzky" — Michael Scott
66

7+
## 10.14.0
8+
9+
### Important Changes
10+
11+
- **feat(cloudflare,vercel-edge): Add support for Google Gen AI instrumentation ([#17723](https://github.com/getsentry/sentry-javascript/pull/17723))**
12+
13+
The SDK now automatically instruments Google's Generative AI operations in Cloudflare Workers and Vercel Edge Runtime environments, providing insights into your AI operations.
14+
15+
### Other Changes
16+
17+
- fix(nextjs): Display updated turbopack warnings ([#17737](https://github.com/getsentry/sentry-javascript/pull/17737))
18+
- ref(core): Wrap isolationscope in `WeakRef` when storing it on spans ([#17712](https://github.com/getsentry/sentry-javascript/pull/17712))
19+
20+
<details>
21+
<summary> <strong>Internal Changes</strong> </summary>
22+
23+
- test(node): Avoid using specific port for node-integration-tests ([#17729](https://github.com/getsentry/sentry-javascript/pull/17729))
24+
- test(nuxt): Update Nuxt version and add Nitro $fetch test ([#17713](https://github.com/getsentry/sentry-javascript/pull/17713))
25+
26+
</details>
27+
728
## 10.13.0
829

930
### Important Changes
Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
import * as Sentry from '@sentry/cloudflare';
2+
import type { GoogleGenAIClient } from '@sentry/core';
3+
import { MockGoogleGenAI } from './mocks';
4+
5+
interface Env {
6+
SENTRY_DSN: string;
7+
}
8+
9+
const mockClient = new MockGoogleGenAI({
10+
apiKey: 'mock-api-key',
11+
});
12+
13+
const client: GoogleGenAIClient = Sentry.instrumentGoogleGenAIClient(mockClient);
14+
15+
export default Sentry.withSentry(
16+
(env: Env) => ({
17+
dsn: env.SENTRY_DSN,
18+
tracesSampleRate: 1.0,
19+
}),
20+
{
21+
async fetch(_request, _env, _ctx) {
22+
// Test 1: chats.create and sendMessage flow
23+
const chat = client.chats.create({
24+
model: 'gemini-1.5-pro',
25+
config: {
26+
temperature: 0.8,
27+
topP: 0.9,
28+
maxOutputTokens: 150,
29+
},
30+
history: [
31+
{
32+
role: 'user',
33+
parts: [{ text: 'Hello, how are you?' }],
34+
},
35+
],
36+
});
37+
38+
const chatResponse = await chat.sendMessage({
39+
message: 'Tell me a joke',
40+
});
41+
42+
// Test 2: models.generateContent
43+
const modelResponse = await client.models.generateContent({
44+
model: 'gemini-1.5-flash',
45+
config: {
46+
temperature: 0.7,
47+
topP: 0.9,
48+
maxOutputTokens: 100,
49+
},
50+
contents: [
51+
{
52+
role: 'user',
53+
parts: [{ text: 'What is the capital of France?' }],
54+
},
55+
],
56+
});
57+
58+
return new Response(JSON.stringify({ chatResponse, modelResponse }));
59+
},
60+
},
61+
);
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import type { GoogleGenAIChat, GoogleGenAIClient, GoogleGenAIResponse } from '@sentry/core';
2+
3+
export class MockGoogleGenAI implements GoogleGenAIClient {
4+
public models: {
5+
generateContent: (...args: unknown[]) => Promise<GoogleGenAIResponse>;
6+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
7+
generateContentStream: (...args: unknown[]) => Promise<AsyncGenerator<GoogleGenAIResponse, any, unknown>>;
8+
};
9+
public chats: {
10+
create: (...args: unknown[]) => GoogleGenAIChat;
11+
};
12+
public apiKey: string;
13+
14+
public constructor(config: { apiKey: string }) {
15+
this.apiKey = config.apiKey;
16+
17+
// models.generateContent functionality
18+
this.models = {
19+
generateContent: async (...args: unknown[]) => {
20+
const params = args[0] as { model: string; contents?: unknown };
21+
// Simulate processing time
22+
await new Promise(resolve => setTimeout(resolve, 10));
23+
24+
if (params.model === 'error-model') {
25+
const error = new Error('Model not found');
26+
(error as unknown as { status: number }).status = 404;
27+
(error as unknown as { headers: Record<string, string> }).headers = { 'x-request-id': 'mock-request-123' };
28+
throw error;
29+
}
30+
31+
return {
32+
candidates: [
33+
{
34+
content: {
35+
parts: [
36+
{
37+
text: 'Hello from Google GenAI mock!',
38+
},
39+
],
40+
role: 'model',
41+
},
42+
finishReason: 'stop',
43+
index: 0,
44+
},
45+
],
46+
usageMetadata: {
47+
promptTokenCount: 8,
48+
candidatesTokenCount: 12,
49+
totalTokenCount: 20,
50+
},
51+
};
52+
},
53+
generateContentStream: async () => {
54+
// Return a promise that resolves to an async generator
55+
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
56+
yield {
57+
candidates: [
58+
{
59+
content: {
60+
parts: [{ text: 'Streaming response' }],
61+
role: 'model',
62+
},
63+
finishReason: 'stop',
64+
index: 0,
65+
},
66+
],
67+
};
68+
})();
69+
},
70+
};
71+
72+
// chats.create implementation
73+
this.chats = {
74+
create: (...args: unknown[]) => {
75+
const params = args[0] as { model: string; config?: Record<string, unknown> };
76+
const model = params.model;
77+
78+
return {
79+
modelVersion: model,
80+
sendMessage: async (..._messageArgs: unknown[]) => {
81+
// Simulate processing time
82+
await new Promise(resolve => setTimeout(resolve, 10));
83+
84+
return {
85+
candidates: [
86+
{
87+
content: {
88+
parts: [
89+
{
90+
text: 'This is a joke from the chat!',
91+
},
92+
],
93+
role: 'model',
94+
},
95+
finishReason: 'stop',
96+
index: 0,
97+
},
98+
],
99+
usageMetadata: {
100+
promptTokenCount: 8,
101+
candidatesTokenCount: 12,
102+
totalTokenCount: 20,
103+
},
104+
modelVersion: model, // Include model version in response
105+
};
106+
},
107+
sendMessageStream: async () => {
108+
// Return a promise that resolves to an async generator
109+
return (async function* (): AsyncGenerator<GoogleGenAIResponse, any, unknown> {
110+
yield {
111+
candidates: [
112+
{
113+
content: {
114+
parts: [{ text: 'Streaming chat response' }],
115+
role: 'model',
116+
},
117+
finishReason: 'stop',
118+
index: 0,
119+
},
120+
],
121+
};
122+
})();
123+
},
124+
};
125+
},
126+
};
127+
}
128+
}
Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,75 @@
1+
import { expect, it } from 'vitest';
2+
import { createRunner } from '../../../runner';
3+
4+
// These tests are not exhaustive because the instrumentation is
5+
// already tested in the node integration tests and we merely
6+
// want to test that the instrumentation does not break in our
7+
// cloudflare SDK.
8+
9+
it('traces Google GenAI chat creation and message sending', async () => {
10+
const runner = createRunner(__dirname)
11+
.ignore('event')
12+
.expect(envelope => {
13+
const transactionEvent = envelope[1]?.[0]?.[1] as any;
14+
15+
expect(transactionEvent.transaction).toBe('GET /');
16+
expect(transactionEvent.spans).toEqual(
17+
expect.arrayContaining([
18+
// First span - chats.create
19+
expect.objectContaining({
20+
data: expect.objectContaining({
21+
'gen_ai.operation.name': 'chat',
22+
'sentry.op': 'gen_ai.chat',
23+
'sentry.origin': 'auto.ai.google_genai',
24+
'gen_ai.system': 'google_genai',
25+
'gen_ai.request.model': 'gemini-1.5-pro',
26+
'gen_ai.request.temperature': 0.8,
27+
'gen_ai.request.top_p': 0.9,
28+
'gen_ai.request.max_tokens': 150,
29+
}),
30+
description: 'chat gemini-1.5-pro create',
31+
op: 'gen_ai.chat',
32+
origin: 'auto.ai.google_genai',
33+
}),
34+
// Second span - chat.sendMessage
35+
expect.objectContaining({
36+
data: expect.objectContaining({
37+
'gen_ai.operation.name': 'chat',
38+
'sentry.op': 'gen_ai.chat',
39+
'sentry.origin': 'auto.ai.google_genai',
40+
'gen_ai.system': 'google_genai',
41+
'gen_ai.request.model': 'gemini-1.5-pro',
42+
'gen_ai.usage.input_tokens': 8,
43+
'gen_ai.usage.output_tokens': 12,
44+
'gen_ai.usage.total_tokens': 20,
45+
}),
46+
description: 'chat gemini-1.5-pro',
47+
op: 'gen_ai.chat',
48+
origin: 'auto.ai.google_genai',
49+
}),
50+
// Third span - models.generateContent
51+
expect.objectContaining({
52+
data: expect.objectContaining({
53+
'gen_ai.operation.name': 'models',
54+
'sentry.op': 'gen_ai.models',
55+
'sentry.origin': 'auto.ai.google_genai',
56+
'gen_ai.system': 'google_genai',
57+
'gen_ai.request.model': 'gemini-1.5-flash',
58+
'gen_ai.request.temperature': 0.7,
59+
'gen_ai.request.top_p': 0.9,
60+
'gen_ai.request.max_tokens': 100,
61+
'gen_ai.usage.input_tokens': 8,
62+
'gen_ai.usage.output_tokens': 12,
63+
'gen_ai.usage.total_tokens': 20,
64+
}),
65+
description: 'models gemini-1.5-flash',
66+
op: 'gen_ai.models',
67+
origin: 'auto.ai.google_genai',
68+
}),
69+
]),
70+
);
71+
})
72+
.start();
73+
await runner.makeRequest('get', '/');
74+
await runner.completed();
75+
});
Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,6 @@
1+
{
2+
"name": "worker-name",
3+
"compatibility_date": "2025-06-17",
4+
"main": "index.ts",
5+
"compatibility_flags": ["nodejs_compat"],
6+
}

dev-packages/e2e-tests/test-applications/nuxt-4/app/app.vue

Lines changed: 0 additions & 20 deletions
This file was deleted.

dev-packages/e2e-tests/test-applications/nuxt-4/app/pages/fetch-server-error.vue renamed to dev-packages/e2e-tests/test-applications/nuxt-4/app/pages/fetch-server-routes.vue

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
<template>
22
<div>
33
<button @click="fetchError">Fetch Server API Error</button>
4+
<button @click="fetchNitroFetch">Fetch Nitro $fetch</button>
45
</div>
56
</template>
67

@@ -10,4 +11,8 @@ import { useFetch } from '#imports';
1011
const fetchError = async () => {
1112
await useFetch('/api/server-error');
1213
};
14+
15+
const fetchNitroFetch = async () => {
16+
await useFetch('/api/nitro-fetch');
17+
};
1318
</script>
Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,20 @@
11
<template>
2-
<h1>Hello!</h1>
2+
<NuxtLayout>
3+
<header>
4+
<nav>
5+
<ul>
6+
<li><NuxtLink to="/fetch-server-routes">Fetch Server Routes</NuxtLink></li>
7+
<li><NuxtLink to="/test-param/1234">Fetch Param</NuxtLink></li>
8+
<li><NuxtLink to="/client-error">Client Error</NuxtLink></li>
9+
</ul>
10+
</nav>
11+
</header>
12+
<NuxtPage />
13+
</NuxtLayout>
314
</template>
15+
16+
<script setup lang="ts">
17+
import { useSentryTestTag } from '#imports';
18+
19+
useSentryTestTag();
20+
</script>

dev-packages/e2e-tests/test-applications/nuxt-4/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@
1818
"dependencies": {
1919
"@pinia/nuxt": "^0.5.5",
2020
"@sentry/nuxt": "latest || *",
21-
"nuxt": "^4.0.0-alpha.4"
21+
"nuxt": "^4.1.2"
2222
},
2323
"devDependencies": {
2424
"@playwright/test": "~1.53.2",
Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
import { defineEventHandler } from '#imports';
2+
3+
export default defineEventHandler(async () => {
4+
return await $fetch('https://example.com');
5+
});

0 commit comments

Comments
 (0)