@@ -81,14 +81,17 @@ def test_nonstreaming_chat_completion(
81
81
tx = events [0 ]
82
82
assert tx ["type" ] == "transaction"
83
83
span = tx ["spans" ][0 ]
84
- assert span ["op" ] == "ai.chat_completions.create.openai "
84
+ assert span ["op" ] == "gen_ai.chat "
85
85
86
86
if send_default_pii and include_prompts :
87
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]["content" ]
88
- assert "the model response" in span ["data" ][SPANDATA .AI_RESPONSES ]["content" ]
87
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]["content" ]
88
+ assert (
89
+ "the model response"
90
+ in span ["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]["content" ]
91
+ )
89
92
else :
90
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
91
- assert SPANDATA .AI_RESPONSES not in span ["data" ]
93
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
94
+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in span ["data" ]
92
95
93
96
assert span ["data" ]["gen_ai.usage.output_tokens" ] == 10
94
97
assert span ["data" ]["gen_ai.usage.input_tokens" ] == 20
@@ -123,14 +126,17 @@ async def test_nonstreaming_chat_completion_async(
123
126
tx = events [0 ]
124
127
assert tx ["type" ] == "transaction"
125
128
span = tx ["spans" ][0 ]
126
- assert span ["op" ] == "ai.chat_completions.create.openai "
129
+ assert span ["op" ] == "gen_ai.chat "
127
130
128
131
if send_default_pii and include_prompts :
129
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]["content" ]
130
- assert "the model response" in span ["data" ][SPANDATA .AI_RESPONSES ]["content" ]
132
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]["content" ]
133
+ assert (
134
+ "the model response"
135
+ in span ["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]["content" ]
136
+ )
131
137
else :
132
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
133
- assert SPANDATA .AI_RESPONSES not in span ["data" ]
138
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
139
+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in span ["data" ]
134
140
135
141
assert span ["data" ]["gen_ai.usage.output_tokens" ] == 10
136
142
assert span ["data" ]["gen_ai.usage.input_tokens" ] == 20
@@ -216,14 +222,14 @@ def test_streaming_chat_completion(
216
222
tx = events [0 ]
217
223
assert tx ["type" ] == "transaction"
218
224
span = tx ["spans" ][0 ]
219
- assert span ["op" ] == "ai.chat_completions.create.openai "
225
+ assert span ["op" ] == "gen_ai.chat "
220
226
221
227
if send_default_pii and include_prompts :
222
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]["content" ]
223
- assert "hello world" in span ["data" ][SPANDATA .AI_RESPONSES ]
228
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]["content" ]
229
+ assert "hello world" in span ["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]
224
230
else :
225
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
226
- assert SPANDATA .AI_RESPONSES not in span ["data" ]
231
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
232
+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in span ["data" ]
227
233
228
234
try :
229
235
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -312,14 +318,14 @@ async def test_streaming_chat_completion_async(
312
318
tx = events [0 ]
313
319
assert tx ["type" ] == "transaction"
314
320
span = tx ["spans" ][0 ]
315
- assert span ["op" ] == "ai.chat_completions.create.openai "
321
+ assert span ["op" ] == "gen_ai.chat "
316
322
317
323
if send_default_pii and include_prompts :
318
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]["content" ]
319
- assert "hello world" in span ["data" ][SPANDATA .AI_RESPONSES ]
324
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]["content" ]
325
+ assert "hello world" in span ["data" ][SPANDATA .GEN_AI_RESPONSE_TEXT ]
320
326
else :
321
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
322
- assert SPANDATA .AI_RESPONSES not in span ["data" ]
327
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
328
+ assert SPANDATA .GEN_AI_RESPONSE_TEXT not in span ["data" ]
323
329
324
330
try :
325
331
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -403,11 +409,11 @@ def test_embeddings_create(
403
409
tx = events [0 ]
404
410
assert tx ["type" ] == "transaction"
405
411
span = tx ["spans" ][0 ]
406
- assert span ["op" ] == "ai .embeddings.create.openai "
412
+ assert span ["op" ] == "gen_ai .embeddings"
407
413
if send_default_pii and include_prompts :
408
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]
414
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]
409
415
else :
410
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
416
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
411
417
412
418
assert span ["data" ]["gen_ai.usage.input_tokens" ] == 20
413
419
assert span ["data" ]["gen_ai.usage.total_tokens" ] == 30
@@ -451,11 +457,11 @@ async def test_embeddings_create_async(
451
457
tx = events [0 ]
452
458
assert tx ["type" ] == "transaction"
453
459
span = tx ["spans" ][0 ]
454
- assert span ["op" ] == "ai .embeddings.create.openai "
460
+ assert span ["op" ] == "gen_ai .embeddings"
455
461
if send_default_pii and include_prompts :
456
- assert "hello" in span ["data" ][SPANDATA .AI_INPUT_MESSAGES ]
462
+ assert "hello" in span ["data" ][SPANDATA .GEN_AI_REQUEST_MESSAGES ]
457
463
else :
458
- assert SPANDATA .AI_INPUT_MESSAGES not in span ["data" ]
464
+ assert SPANDATA .GEN_AI_REQUEST_MESSAGES not in span ["data" ]
459
465
460
466
assert span ["data" ]["gen_ai.usage.input_tokens" ] == 20
461
467
assert span ["data" ]["gen_ai.usage.total_tokens" ] == 30
0 commit comments