Skip to content

Commit 97ff2f7

Browse files
authored
feat(core,node): Add instrumentation for GoogleGenerativeAI (#17625)
This PR implements instrumentation for the Google GenerativeAI SDK, adding automatic tracing for both direct model calls (models.generateContent) and conversational chat sessions (chats.create + sendMessage). The implementation follows Sentry's AI Agents Manual Instrumentation conventions and includes integration tests. Usage: ``` import * as Sentry from '@sentry/node'; Sentry.init({ integrations: [ Sentry.googleGenAIIntegration({ recordInputs: true, // Record prompts/messages recordOutputs: true, // Record AI responses }), ], }); // Chat Google GenAI calls are now automatically instrumented ```
1 parent 3c76c5d commit 97ff2f7

File tree

22 files changed

+1198
-6
lines changed

22 files changed

+1198
-6
lines changed

dev-packages/node-integration-tests/package.json

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
},
2525
"dependencies": {
2626
"@aws-sdk/client-s3": "^3.552.0",
27+
"@google/genai": "^1.20.0",
2728
"@hapi/hapi": "^21.3.10",
2829
"@nestjs/common": "11.1.3",
2930
"@nestjs/core": "11.1.3",
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
integrations: [
11+
Sentry.googleGenAIIntegration({
12+
recordInputs: true,
13+
recordOutputs: true,
14+
}),
15+
],
16+
beforeSendTransaction: event => {
17+
// Filter out mock express server transactions
18+
if (event.transaction.includes('/v1beta/')) {
19+
return null;
20+
}
21+
return event;
22+
},
23+
});
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: true,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction.includes('/v1beta/')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});
Lines changed: 17 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,17 @@
1+
import * as Sentry from '@sentry/node';
2+
import { loggingTransport } from '@sentry-internal/node-integration-tests';
3+
4+
Sentry.init({
5+
dsn: 'https://[email protected]/1337',
6+
release: '1.0',
7+
tracesSampleRate: 1.0,
8+
sendDefaultPii: false,
9+
transport: loggingTransport,
10+
beforeSendTransaction: event => {
11+
// Filter out mock express server transactions
12+
if (event.transaction.includes('/v1beta')) {
13+
return null;
14+
}
15+
return event;
16+
},
17+
});
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
import { GoogleGenAI } from '@google/genai';
2+
import * as Sentry from '@sentry/node';
3+
import express from 'express';
4+
5+
const PORT = 3333;
6+
7+
function startMockGoogleGenAIServer() {
8+
const app = express();
9+
app.use(express.json());
10+
11+
app.post('/v1beta/models/:model\\:generateContent', (req, res) => {
12+
const model = req.params.model;
13+
14+
if (model === 'error-model') {
15+
res.status(404).set('x-request-id', 'mock-request-123').end('Model not found');
16+
return;
17+
}
18+
19+
res.send({
20+
candidates: [
21+
{
22+
content: {
23+
parts: [
24+
{
25+
text: 'Mock response from Google GenAI!',
26+
},
27+
],
28+
role: 'model',
29+
},
30+
finishReason: 'stop',
31+
index: 0,
32+
},
33+
],
34+
usageMetadata: {
35+
promptTokenCount: 8,
36+
candidatesTokenCount: 12,
37+
totalTokenCount: 20,
38+
},
39+
});
40+
});
41+
42+
return app.listen(PORT);
43+
}
44+
45+
async function run() {
46+
const server = startMockGoogleGenAIServer();
47+
48+
await Sentry.startSpan({ op: 'function', name: 'main' }, async () => {
49+
const client = new GoogleGenAI({
50+
apiKey: 'mock-api-key',
51+
httpOptions: { baseUrl: `http://localhost:${PORT}` },
52+
});
53+
54+
// Test 1: chats.create and sendMessage flow
55+
const chat = client.chats.create({
56+
model: 'gemini-1.5-pro',
57+
config: {
58+
temperature: 0.8,
59+
topP: 0.9,
60+
maxOutputTokens: 150,
61+
},
62+
history: [
63+
{
64+
role: 'user',
65+
parts: [{ text: 'Hello, how are you?' }],
66+
},
67+
],
68+
});
69+
70+
await chat.sendMessage({
71+
message: 'Tell me a joke',
72+
});
73+
74+
// Test 2: models.generateContent
75+
await client.models.generateContent({
76+
model: 'gemini-1.5-flash',
77+
config: {
78+
temperature: 0.7,
79+
topP: 0.9,
80+
maxOutputTokens: 100,
81+
},
82+
contents: [
83+
{
84+
role: 'user',
85+
parts: [{ text: 'What is the capital of France?' }],
86+
},
87+
],
88+
});
89+
90+
// Test 3: Error handling
91+
try {
92+
await client.models.generateContent({
93+
model: 'error-model',
94+
contents: [
95+
{
96+
role: 'user',
97+
parts: [{ text: 'This will fail' }],
98+
},
99+
],
100+
});
101+
} catch (error) {
102+
// Expected error
103+
}
104+
});
105+
106+
server.close();
107+
}
108+
109+
run();
Lines changed: 205 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,205 @@
1+
import { afterAll, describe, expect } from 'vitest';
2+
import { cleanupChildProcesses, createEsmAndCjsTests } from '../../../utils/runner';
3+
4+
describe('Google GenAI integration', () => {
5+
afterAll(() => {
6+
cleanupChildProcesses();
7+
});
8+
9+
const EXPECTED_TRANSACTION_DEFAULT_PII_FALSE = {
10+
transaction: 'main',
11+
spans: expect.arrayContaining([
12+
// First span - chats.create
13+
expect.objectContaining({
14+
data: {
15+
'gen_ai.operation.name': 'chat',
16+
'sentry.op': 'gen_ai.chat',
17+
'sentry.origin': 'auto.ai.google_genai',
18+
'gen_ai.system': 'google_genai',
19+
'gen_ai.request.model': 'gemini-1.5-pro',
20+
'gen_ai.request.temperature': 0.8,
21+
'gen_ai.request.top_p': 0.9,
22+
'gen_ai.request.max_tokens': 150,
23+
},
24+
description: 'chat gemini-1.5-pro create',
25+
op: 'gen_ai.chat',
26+
origin: 'auto.ai.google_genai',
27+
status: 'ok',
28+
}),
29+
// Second span - chat.sendMessage (should get model from context)
30+
expect.objectContaining({
31+
data: {
32+
'gen_ai.operation.name': 'chat',
33+
'sentry.op': 'gen_ai.chat',
34+
'sentry.origin': 'auto.ai.google_genai',
35+
'gen_ai.system': 'google_genai',
36+
'gen_ai.request.model': 'gemini-1.5-pro', // Should get from chat context
37+
'gen_ai.usage.input_tokens': 8,
38+
'gen_ai.usage.output_tokens': 12,
39+
'gen_ai.usage.total_tokens': 20,
40+
},
41+
description: 'chat gemini-1.5-pro',
42+
op: 'gen_ai.chat',
43+
origin: 'auto.ai.google_genai',
44+
status: 'ok',
45+
}),
46+
// Third span - models.generateContent
47+
expect.objectContaining({
48+
data: {
49+
'gen_ai.operation.name': 'models',
50+
'sentry.op': 'gen_ai.models',
51+
'sentry.origin': 'auto.ai.google_genai',
52+
'gen_ai.system': 'google_genai',
53+
'gen_ai.request.model': 'gemini-1.5-flash',
54+
'gen_ai.request.temperature': 0.7,
55+
'gen_ai.request.top_p': 0.9,
56+
'gen_ai.request.max_tokens': 100,
57+
'gen_ai.usage.input_tokens': 8,
58+
'gen_ai.usage.output_tokens': 12,
59+
'gen_ai.usage.total_tokens': 20,
60+
},
61+
description: 'models gemini-1.5-flash',
62+
op: 'gen_ai.models',
63+
origin: 'auto.ai.google_genai',
64+
status: 'ok',
65+
}),
66+
// Fourth span - error handling
67+
expect.objectContaining({
68+
data: {
69+
'gen_ai.operation.name': 'models',
70+
'sentry.op': 'gen_ai.models',
71+
'sentry.origin': 'auto.ai.google_genai',
72+
'gen_ai.system': 'google_genai',
73+
'gen_ai.request.model': 'error-model',
74+
},
75+
description: 'models error-model',
76+
op: 'gen_ai.models',
77+
origin: 'auto.ai.google_genai',
78+
status: 'unknown_error',
79+
}),
80+
]),
81+
};
82+
83+
const EXPECTED_TRANSACTION_DEFAULT_PII_TRUE = {
84+
transaction: 'main',
85+
spans: expect.arrayContaining([
86+
// First span - chats.create with PII
87+
expect.objectContaining({
88+
data: expect.objectContaining({
89+
'gen_ai.operation.name': 'chat',
90+
'sentry.op': 'gen_ai.chat',
91+
'sentry.origin': 'auto.ai.google_genai',
92+
'gen_ai.system': 'google_genai',
93+
'gen_ai.request.model': 'gemini-1.5-pro',
94+
'gen_ai.request.temperature': 0.8,
95+
'gen_ai.request.top_p': 0.9,
96+
'gen_ai.request.max_tokens': 150,
97+
'gen_ai.request.messages': expect.any(String), // Should include history when recordInputs: true
98+
}),
99+
description: 'chat gemini-1.5-pro create',
100+
op: 'gen_ai.chat',
101+
origin: 'auto.ai.google_genai',
102+
status: 'ok',
103+
}),
104+
// Second span - chat.sendMessage with PII
105+
expect.objectContaining({
106+
data: expect.objectContaining({
107+
'gen_ai.operation.name': 'chat',
108+
'sentry.op': 'gen_ai.chat',
109+
'sentry.origin': 'auto.ai.google_genai',
110+
'gen_ai.system': 'google_genai',
111+
'gen_ai.request.model': 'gemini-1.5-pro',
112+
'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true
113+
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
114+
'gen_ai.usage.input_tokens': 8,
115+
'gen_ai.usage.output_tokens': 12,
116+
'gen_ai.usage.total_tokens': 20,
117+
}),
118+
description: 'chat gemini-1.5-pro',
119+
op: 'gen_ai.chat',
120+
origin: 'auto.ai.google_genai',
121+
status: 'ok',
122+
}),
123+
// Third span - models.generateContent with PII
124+
expect.objectContaining({
125+
data: expect.objectContaining({
126+
'gen_ai.operation.name': 'models',
127+
'sentry.op': 'gen_ai.models',
128+
'sentry.origin': 'auto.ai.google_genai',
129+
'gen_ai.system': 'google_genai',
130+
'gen_ai.request.model': 'gemini-1.5-flash',
131+
'gen_ai.request.temperature': 0.7,
132+
'gen_ai.request.top_p': 0.9,
133+
'gen_ai.request.max_tokens': 100,
134+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
135+
'gen_ai.response.text': expect.any(String), // Should include response when recordOutputs: true
136+
'gen_ai.usage.input_tokens': 8,
137+
'gen_ai.usage.output_tokens': 12,
138+
'gen_ai.usage.total_tokens': 20,
139+
}),
140+
description: 'models gemini-1.5-flash',
141+
op: 'gen_ai.models',
142+
origin: 'auto.ai.google_genai',
143+
status: 'ok',
144+
}),
145+
// Fourth span - error handling with PII
146+
expect.objectContaining({
147+
data: expect.objectContaining({
148+
'gen_ai.operation.name': 'models',
149+
'sentry.op': 'gen_ai.models',
150+
'sentry.origin': 'auto.ai.google_genai',
151+
'gen_ai.system': 'google_genai',
152+
'gen_ai.request.model': 'error-model',
153+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
154+
}),
155+
description: 'models error-model',
156+
op: 'gen_ai.models',
157+
origin: 'auto.ai.google_genai',
158+
status: 'unknown_error',
159+
}),
160+
]),
161+
};
162+
163+
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
164+
transaction: 'main',
165+
spans: expect.arrayContaining([
166+
// Check that custom options are respected
167+
expect.objectContaining({
168+
data: expect.objectContaining({
169+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
170+
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
171+
}),
172+
}),
173+
]),
174+
};
175+
176+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument.mjs', (createRunner, test) => {
177+
test('creates google genai related spans with sendDefaultPii: false', async () => {
178+
await createRunner()
179+
.ignore('event')
180+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_FALSE })
181+
.start()
182+
.completed();
183+
});
184+
});
185+
186+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-pii.mjs', (createRunner, test) => {
187+
test('creates google genai related spans with sendDefaultPii: true', async () => {
188+
await createRunner()
189+
.ignore('event')
190+
.expect({ transaction: EXPECTED_TRANSACTION_DEFAULT_PII_TRUE })
191+
.start()
192+
.completed();
193+
});
194+
});
195+
196+
createEsmAndCjsTests(__dirname, 'scenario.mjs', 'instrument-with-options.mjs', (createRunner, test) => {
197+
test('creates google genai related spans with custom options', async () => {
198+
await createRunner()
199+
.ignore('event')
200+
.expect({ transaction: EXPECTED_TRANSACTION_WITH_OPTIONS })
201+
.start()
202+
.completed();
203+
});
204+
});
205+
});

packages/astro/src/index.server.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ export {
1515
anthropicAIIntegration,
1616
// eslint-disable-next-line deprecation/deprecation
1717
anrIntegration,
18+
googleGenAIIntegration,
1819
// eslint-disable-next-line deprecation/deprecation
1920
disableAnrDetectionForCallback,
2021
captureCheckIn,

packages/aws-serverless/src/index.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -125,6 +125,7 @@ export {
125125
profiler,
126126
amqplibIntegration,
127127
anthropicAIIntegration,
128+
googleGenAIIntegration,
128129
vercelAIIntegration,
129130
logger,
130131
consoleLoggingIntegration,

0 commit comments

Comments
 (0)