@@ -13,14 +13,14 @@ describe('Vercel AI integration', () => {
1313 // First span - no telemetry config, should enable telemetry but not record inputs/outputs when sendDefaultPii: false
1414 expect . objectContaining ( {
1515 data : {
16- 'ai.model.id' : 'mock-model-id' ,
17- 'ai.model.provider' : 'mock-provider' ,
18- 'ai.operationId' : 'ai.generateText' ,
19- 'ai.pipeline.name' : 'generateText' ,
20- 'ai.response.finishReason' : 'stop' ,
21- 'ai.settings.maxRetries' : 2 ,
22- 'ai.settings.maxSteps' : 1 ,
23- 'ai.streaming' : false ,
16+ 'vercel. ai.model.id' : 'mock-model-id' ,
17+ 'vercel. ai.model.provider' : 'mock-provider' ,
18+ 'vercel. ai.operationId' : 'ai.generateText' ,
19+ 'vercel. ai.pipeline.name' : 'generateText' ,
20+ 'vercel. ai.response.finishReason' : 'stop' ,
21+ 'vercel. ai.settings.maxRetries' : 2 ,
22+ 'vercel. ai.settings.maxSteps' : 1 ,
23+ 'vercel. ai.streaming' : false ,
2424 'gen_ai.response.model' : 'mock-model-id' ,
2525 'gen_ai.usage.input_tokens' : 10 ,
2626 'gen_ai.usage.output_tokens' : 20 ,
@@ -40,18 +40,18 @@ describe('Vercel AI integration', () => {
4040 'sentry.origin' : 'auto.vercelai.otel' ,
4141 'sentry.op' : 'gen_ai.generate_text' ,
4242 'operation.name' : 'ai.generateText.doGenerate' ,
43- 'ai.operationId' : 'ai.generateText.doGenerate' ,
44- 'ai.model.provider' : 'mock-provider' ,
45- 'ai.model.id' : 'mock-model-id' ,
46- 'ai.settings.maxRetries' : 2 ,
43+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
44+ 'vercel. ai.model.provider' : 'mock-provider' ,
45+ 'vercel. ai.model.id' : 'mock-model-id' ,
46+ 'vercel. ai.settings.maxRetries' : 2 ,
4747 'gen_ai.system' : 'mock-provider' ,
4848 'gen_ai.request.model' : 'mock-model-id' ,
49- 'ai.pipeline.name' : 'generateText.doGenerate' ,
50- 'ai.streaming' : false ,
51- 'ai.response.finishReason' : 'stop' ,
52- 'ai.response.model' : 'mock-model-id' ,
53- 'ai.response.id' : expect . any ( String ) ,
54- 'ai.response.timestamp' : expect . any ( String ) ,
49+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
50+ 'vercel. ai.streaming' : false ,
51+ 'vercel. ai.response.finishReason' : 'stop' ,
52+ 'vercel. ai.response.model' : 'mock-model-id' ,
53+ 'vercel. ai.response.id' : expect . any ( String ) ,
54+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
5555 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
5656 'gen_ai.usage.input_tokens' : 10 ,
5757 'gen_ai.usage.output_tokens' : 20 ,
@@ -67,16 +67,16 @@ describe('Vercel AI integration', () => {
6767 // Third span - explicit telemetry enabled, should record inputs/outputs regardless of sendDefaultPii
6868 expect . objectContaining ( {
6969 data : {
70- 'ai.model.id' : 'mock-model-id' ,
71- 'ai.model.provider' : 'mock-provider' ,
72- 'ai.operationId' : 'ai.generateText' ,
73- 'ai.pipeline.name' : 'generateText' ,
74- 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
75- 'ai.response.finishReason' : 'stop' ,
70+ 'vercel. ai.model.id' : 'mock-model-id' ,
71+ 'vercel. ai.model.provider' : 'mock-provider' ,
72+ 'vercel. ai.operationId' : 'ai.generateText' ,
73+ 'vercel. ai.pipeline.name' : 'generateText' ,
74+ 'vercel. ai.prompt' : '{"prompt":"Where is the second span?"}' ,
75+ 'vercel. ai.response.finishReason' : 'stop' ,
7676 'gen_ai.response.text' : expect . any ( String ) ,
77- 'ai.settings.maxRetries' : 2 ,
78- 'ai.settings.maxSteps' : 1 ,
79- 'ai.streaming' : false ,
77+ 'vercel. ai.settings.maxRetries' : 2 ,
78+ 'vercel. ai.settings.maxSteps' : 1 ,
79+ 'vercel. ai.streaming' : false ,
8080 'gen_ai.prompt' : '{"prompt":"Where is the second span?"}' ,
8181 'gen_ai.response.model' : 'mock-model-id' ,
8282 'gen_ai.usage.input_tokens' : 10 ,
@@ -97,20 +97,20 @@ describe('Vercel AI integration', () => {
9797 'sentry.origin' : 'auto.vercelai.otel' ,
9898 'sentry.op' : 'gen_ai.generate_text' ,
9999 'operation.name' : 'ai.generateText.doGenerate' ,
100- 'ai.operationId' : 'ai.generateText.doGenerate' ,
101- 'ai.model.provider' : 'mock-provider' ,
102- 'ai.model.id' : 'mock-model-id' ,
103- 'ai.settings.maxRetries' : 2 ,
100+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
101+ 'vercel. ai.model.provider' : 'mock-provider' ,
102+ 'vercel. ai.model.id' : 'mock-model-id' ,
103+ 'vercel. ai.settings.maxRetries' : 2 ,
104104 'gen_ai.system' : 'mock-provider' ,
105105 'gen_ai.request.model' : 'mock-model-id' ,
106- 'ai.pipeline.name' : 'generateText.doGenerate' ,
107- 'ai.streaming' : false ,
108- 'ai.response.finishReason' : 'stop' ,
109- 'ai.response.model' : 'mock-model-id' ,
110- 'ai.response.id' : expect . any ( String ) ,
106+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
107+ 'vercel. ai.streaming' : false ,
108+ 'vercel. ai.response.finishReason' : 'stop' ,
109+ 'vercel. ai.response.model' : 'mock-model-id' ,
110+ 'vercel. ai.response.id' : expect . any ( String ) ,
111111 'gen_ai.response.text' : expect . any ( String ) ,
112- 'ai.response.timestamp' : expect . any ( String ) ,
113- 'ai.prompt.format' : expect . any ( String ) ,
112+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
113+ 'vercel. ai.prompt.format' : expect . any ( String ) ,
114114 'gen_ai.request.messages' : expect . any ( String ) ,
115115 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
116116 'gen_ai.usage.input_tokens' : 10 ,
@@ -127,14 +127,14 @@ describe('Vercel AI integration', () => {
127127 // Fifth span - tool call generateText span
128128 expect . objectContaining ( {
129129 data : {
130- 'ai.model.id' : 'mock-model-id' ,
131- 'ai.model.provider' : 'mock-provider' ,
132- 'ai.operationId' : 'ai.generateText' ,
133- 'ai.pipeline.name' : 'generateText' ,
134- 'ai.response.finishReason' : 'tool-calls' ,
135- 'ai.settings.maxRetries' : 2 ,
136- 'ai.settings.maxSteps' : 1 ,
137- 'ai.streaming' : false ,
130+ 'vercel. ai.model.id' : 'mock-model-id' ,
131+ 'vercel. ai.model.provider' : 'mock-provider' ,
132+ 'vercel. ai.operationId' : 'ai.generateText' ,
133+ 'vercel. ai.pipeline.name' : 'generateText' ,
134+ 'vercel. ai.response.finishReason' : 'tool-calls' ,
135+ 'vercel. ai.settings.maxRetries' : 2 ,
136+ 'vercel. ai.settings.maxSteps' : 1 ,
137+ 'vercel. ai.streaming' : false ,
138138 'gen_ai.response.model' : 'mock-model-id' ,
139139 'gen_ai.usage.input_tokens' : 15 ,
140140 'gen_ai.usage.output_tokens' : 25 ,
@@ -151,16 +151,16 @@ describe('Vercel AI integration', () => {
151151 // Sixth span - tool call doGenerate span
152152 expect . objectContaining ( {
153153 data : {
154- 'ai.model.id' : 'mock-model-id' ,
155- 'ai.model.provider' : 'mock-provider' ,
156- 'ai.operationId' : 'ai.generateText.doGenerate' ,
157- 'ai.pipeline.name' : 'generateText.doGenerate' ,
158- 'ai.response.finishReason' : 'tool-calls' ,
159- 'ai.response.id' : expect . any ( String ) ,
160- 'ai.response.model' : 'mock-model-id' ,
161- 'ai.response.timestamp' : expect . any ( String ) ,
162- 'ai.settings.maxRetries' : 2 ,
163- 'ai.streaming' : false ,
154+ 'vercel. ai.model.id' : 'mock-model-id' ,
155+ 'vercel. ai.model.provider' : 'mock-provider' ,
156+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
157+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
158+ 'vercel. ai.response.finishReason' : 'tool-calls' ,
159+ 'vercel. ai.response.id' : expect . any ( String ) ,
160+ 'vercel. ai.response.model' : 'mock-model-id' ,
161+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
162+ 'vercel. ai.settings.maxRetries' : 2 ,
163+ 'vercel. ai.streaming' : false ,
164164 'gen_ai.request.model' : 'mock-model-id' ,
165165 'gen_ai.response.finish_reasons' : [ 'tool-calls' ] ,
166166 'gen_ai.response.id' : expect . any ( String ) ,
@@ -181,7 +181,7 @@ describe('Vercel AI integration', () => {
181181 // Seventh span - tool call execution span
182182 expect . objectContaining ( {
183183 data : {
184- 'ai.operationId' : 'ai.toolCall' ,
184+ 'vercel. ai.operationId' : 'ai.toolCall' ,
185185 'gen_ai.tool.call.id' : 'call-1' ,
186186 'gen_ai.tool.name' : 'getWeather' ,
187187 'gen_ai.tool.type' : 'function' ,
@@ -203,16 +203,16 @@ describe('Vercel AI integration', () => {
203203 // First span - no telemetry config, should enable telemetry AND record inputs/outputs when sendDefaultPii: true
204204 expect . objectContaining ( {
205205 data : {
206- 'ai.model.id' : 'mock-model-id' ,
207- 'ai.model.provider' : 'mock-provider' ,
208- 'ai.operationId' : 'ai.generateText' ,
209- 'ai.pipeline.name' : 'generateText' ,
210- 'ai.prompt' : '{"prompt":"Where is the first span?"}' ,
211- 'ai.response.finishReason' : 'stop' ,
206+ 'vercel. ai.model.id' : 'mock-model-id' ,
207+ 'vercel. ai.model.provider' : 'mock-provider' ,
208+ 'vercel. ai.operationId' : 'ai.generateText' ,
209+ 'vercel. ai.pipeline.name' : 'generateText' ,
210+ 'vercel. ai.prompt' : '{"prompt":"Where is the first span?"}' ,
211+ 'vercel. ai.response.finishReason' : 'stop' ,
212212 'gen_ai.response.text' : 'First span here!' ,
213- 'ai.settings.maxRetries' : 2 ,
214- 'ai.settings.maxSteps' : 1 ,
215- 'ai.streaming' : false ,
213+ 'vercel. ai.settings.maxRetries' : 2 ,
214+ 'vercel. ai.settings.maxSteps' : 1 ,
215+ 'vercel. ai.streaming' : false ,
216216 'gen_ai.prompt' : '{"prompt":"Where is the first span?"}' ,
217217 'gen_ai.response.model' : 'mock-model-id' ,
218218 'gen_ai.usage.input_tokens' : 10 ,
@@ -230,19 +230,19 @@ describe('Vercel AI integration', () => {
230230 // Second span - doGenerate for first call, should also include input/output fields when sendDefaultPii: true
231231 expect . objectContaining ( {
232232 data : {
233- 'ai.model.id' : 'mock-model-id' ,
234- 'ai.model.provider' : 'mock-provider' ,
235- 'ai.operationId' : 'ai.generateText.doGenerate' ,
236- 'ai.pipeline.name' : 'generateText.doGenerate' ,
237- 'ai.prompt.format' : 'prompt' ,
233+ 'vercel. ai.model.id' : 'mock-model-id' ,
234+ 'vercel. ai.model.provider' : 'mock-provider' ,
235+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
236+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
237+ 'vercel. ai.prompt.format' : 'prompt' ,
238238 'gen_ai.request.messages' : '[{"role":"user","content":[{"type":"text","text":"Where is the first span?"}]}]' ,
239- 'ai.response.finishReason' : 'stop' ,
240- 'ai.response.id' : expect . any ( String ) ,
241- 'ai.response.model' : 'mock-model-id' ,
239+ 'vercel. ai.response.finishReason' : 'stop' ,
240+ 'vercel. ai.response.id' : expect . any ( String ) ,
241+ 'vercel. ai.response.model' : 'mock-model-id' ,
242242 'gen_ai.response.text' : 'First span here!' ,
243- 'ai.response.timestamp' : expect . any ( String ) ,
244- 'ai.settings.maxRetries' : 2 ,
245- 'ai.streaming' : false ,
243+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
244+ 'vercel. ai.settings.maxRetries' : 2 ,
245+ 'vercel. ai.streaming' : false ,
246246 'gen_ai.request.model' : 'mock-model-id' ,
247247 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
248248 'gen_ai.response.id' : expect . any ( String ) ,
@@ -263,16 +263,16 @@ describe('Vercel AI integration', () => {
263263 // Third span - explicitly enabled telemetry, should record inputs/outputs regardless of sendDefaultPii
264264 expect . objectContaining ( {
265265 data : {
266- 'ai.model.id' : 'mock-model-id' ,
267- 'ai.model.provider' : 'mock-provider' ,
268- 'ai.operationId' : 'ai.generateText' ,
269- 'ai.pipeline.name' : 'generateText' ,
270- 'ai.prompt' : '{"prompt":"Where is the second span?"}' ,
271- 'ai.response.finishReason' : 'stop' ,
266+ 'vercel. ai.model.id' : 'mock-model-id' ,
267+ 'vercel. ai.model.provider' : 'mock-provider' ,
268+ 'vercel. ai.operationId' : 'ai.generateText' ,
269+ 'vercel. ai.pipeline.name' : 'generateText' ,
270+ 'vercel. ai.prompt' : '{"prompt":"Where is the second span?"}' ,
271+ 'vercel. ai.response.finishReason' : 'stop' ,
272272 'gen_ai.response.text' : expect . any ( String ) ,
273- 'ai.settings.maxRetries' : 2 ,
274- 'ai.settings.maxSteps' : 1 ,
275- 'ai.streaming' : false ,
273+ 'vercel. ai.settings.maxRetries' : 2 ,
274+ 'vercel. ai.settings.maxSteps' : 1 ,
275+ 'vercel. ai.streaming' : false ,
276276 'gen_ai.prompt' : '{"prompt":"Where is the second span?"}' ,
277277 'gen_ai.response.model' : 'mock-model-id' ,
278278 'gen_ai.usage.input_tokens' : 10 ,
@@ -293,20 +293,20 @@ describe('Vercel AI integration', () => {
293293 'sentry.origin' : 'auto.vercelai.otel' ,
294294 'sentry.op' : 'gen_ai.generate_text' ,
295295 'operation.name' : 'ai.generateText.doGenerate' ,
296- 'ai.operationId' : 'ai.generateText.doGenerate' ,
297- 'ai.model.provider' : 'mock-provider' ,
298- 'ai.model.id' : 'mock-model-id' ,
299- 'ai.settings.maxRetries' : 2 ,
296+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
297+ 'vercel. ai.model.provider' : 'mock-provider' ,
298+ 'vercel. ai.model.id' : 'mock-model-id' ,
299+ 'vercel. ai.settings.maxRetries' : 2 ,
300300 'gen_ai.system' : 'mock-provider' ,
301301 'gen_ai.request.model' : 'mock-model-id' ,
302- 'ai.pipeline.name' : 'generateText.doGenerate' ,
303- 'ai.streaming' : false ,
304- 'ai.response.finishReason' : 'stop' ,
305- 'ai.response.model' : 'mock-model-id' ,
306- 'ai.response.id' : expect . any ( String ) ,
302+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
303+ 'vercel. ai.streaming' : false ,
304+ 'vercel. ai.response.finishReason' : 'stop' ,
305+ 'vercel. ai.response.model' : 'mock-model-id' ,
306+ 'vercel. ai.response.id' : expect . any ( String ) ,
307307 'gen_ai.response.text' : expect . any ( String ) ,
308- 'ai.response.timestamp' : expect . any ( String ) ,
309- 'ai.prompt.format' : expect . any ( String ) ,
308+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
309+ 'vercel. ai.prompt.format' : expect . any ( String ) ,
310310 'gen_ai.request.messages' : expect . any ( String ) ,
311311 'gen_ai.response.finish_reasons' : [ 'stop' ] ,
312312 'gen_ai.usage.input_tokens' : 10 ,
@@ -323,17 +323,17 @@ describe('Vercel AI integration', () => {
323323 // Fifth span - tool call generateText span (should include prompts when sendDefaultPii: true)
324324 expect . objectContaining ( {
325325 data : {
326- 'ai.model.id' : 'mock-model-id' ,
327- 'ai.model.provider' : 'mock-provider' ,
328- 'ai.operationId' : 'ai.generateText' ,
329- 'ai.pipeline.name' : 'generateText' ,
330- 'ai.prompt' : '{"prompt":"What is the weather in San Francisco?"}' ,
331- 'ai.response.finishReason' : 'tool-calls' ,
326+ 'vercel. ai.model.id' : 'mock-model-id' ,
327+ 'vercel. ai.model.provider' : 'mock-provider' ,
328+ 'vercel. ai.operationId' : 'ai.generateText' ,
329+ 'vercel. ai.pipeline.name' : 'generateText' ,
330+ 'vercel. ai.prompt' : '{"prompt":"What is the weather in San Francisco?"}' ,
331+ 'vercel. ai.response.finishReason' : 'tool-calls' ,
332332 'gen_ai.response.text' : 'Tool call completed!' ,
333333 'gen_ai.response.tool_calls' : expect . any ( String ) ,
334- 'ai.settings.maxRetries' : 2 ,
335- 'ai.settings.maxSteps' : 1 ,
336- 'ai.streaming' : false ,
334+ 'vercel. ai.settings.maxRetries' : 2 ,
335+ 'vercel. ai.settings.maxSteps' : 1 ,
336+ 'vercel. ai.streaming' : false ,
337337 'gen_ai.prompt' : '{"prompt":"What is the weather in San Francisco?"}' ,
338338 'gen_ai.response.model' : 'mock-model-id' ,
339339 'gen_ai.usage.input_tokens' : 15 ,
@@ -351,22 +351,22 @@ describe('Vercel AI integration', () => {
351351 // Sixth span - tool call doGenerate span (should include prompts when sendDefaultPii: true)
352352 expect . objectContaining ( {
353353 data : {
354- 'ai.model.id' : 'mock-model-id' ,
355- 'ai.model.provider' : 'mock-provider' ,
356- 'ai.operationId' : 'ai.generateText.doGenerate' ,
357- 'ai.pipeline.name' : 'generateText.doGenerate' ,
358- 'ai.prompt.format' : expect . any ( String ) ,
354+ 'vercel. ai.model.id' : 'mock-model-id' ,
355+ 'vercel. ai.model.provider' : 'mock-provider' ,
356+ 'vercel. ai.operationId' : 'ai.generateText.doGenerate' ,
357+ 'vercel. ai.pipeline.name' : 'generateText.doGenerate' ,
358+ 'vercel. ai.prompt.format' : expect . any ( String ) ,
359359 'gen_ai.request.messages' : expect . any ( String ) ,
360- 'ai.prompt.toolChoice' : expect . any ( String ) ,
360+ 'vercel. ai.prompt.toolChoice' : expect . any ( String ) ,
361361 'gen_ai.request.available_tools' : expect . any ( Array ) ,
362- 'ai.response.finishReason' : 'tool-calls' ,
363- 'ai.response.id' : expect . any ( String ) ,
364- 'ai.response.model' : 'mock-model-id' ,
362+ 'vercel. ai.response.finishReason' : 'tool-calls' ,
363+ 'vercel. ai.response.id' : expect . any ( String ) ,
364+ 'vercel. ai.response.model' : 'mock-model-id' ,
365365 'gen_ai.response.text' : 'Tool call completed!' ,
366- 'ai.response.timestamp' : expect . any ( String ) ,
366+ 'vercel. ai.response.timestamp' : expect . any ( String ) ,
367367 'gen_ai.response.tool_calls' : expect . any ( String ) ,
368- 'ai.settings.maxRetries' : 2 ,
369- 'ai.streaming' : false ,
368+ 'vercel. ai.settings.maxRetries' : 2 ,
369+ 'vercel. ai.streaming' : false ,
370370 'gen_ai.request.model' : 'mock-model-id' ,
371371 'gen_ai.response.finish_reasons' : [ 'tool-calls' ] ,
372372 'gen_ai.response.id' : expect . any ( String ) ,
@@ -387,7 +387,7 @@ describe('Vercel AI integration', () => {
387387 // Seventh span - tool call execution span
388388 expect . objectContaining ( {
389389 data : {
390- 'ai.operationId' : 'ai.toolCall' ,
390+ 'vercel. ai.operationId' : 'ai.toolCall' ,
391391 'gen_ai.tool.call.id' : 'call-1' ,
392392 'gen_ai.tool.name' : 'getWeather' ,
393393 'gen_ai.tool.input' : expect . any ( String ) ,
0 commit comments