Skip to content

Commit dca9f30

Browse files
authored
Update openai spans to be OTel compatible (#4563)
Change the spans name, op, and attributes to the new OTel compatible names. We checked and none of our customers use those legacy span names, span ops, or attribute names in alerts, dashboards, or saved queries so this is save to change in a minor.
1 parent 34dcba4 commit dca9f30

File tree

3 files changed

+57
-40
lines changed

3 files changed

+57
-40
lines changed

sentry_sdk/consts.py

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -372,6 +372,12 @@ class SPANDATA:
372372
Example: "chat"
373373
"""
374374

375+
GEN_AI_RESPONSE_MODEL = "gen_ai.response.model"
376+
"""
377+
Exact model identifier used to generate the response
378+
Example: gpt-4o-mini-2024-07-18
379+
"""
380+
375381
GEN_AI_RESPONSE_TEXT = "gen_ai.response.text"
376382
"""
377383
The model's response text messages.
@@ -649,6 +655,7 @@ class OP:
649655
FUNCTION_AWS = "function.aws"
650656
FUNCTION_GCP = "function.gcp"
651657
GEN_AI_CHAT = "gen_ai.chat"
658+
GEN_AI_EMBEDDINGS = "gen_ai.embeddings"
652659
GEN_AI_EXECUTE_TOOL = "gen_ai.execute_tool"
653660
GEN_AI_HANDOFF = "gen_ai.handoff"
654661
GEN_AI_INVOKE_AGENT = "gen_ai.invoke_agent"
@@ -674,8 +681,6 @@ class OP:
674681
MIDDLEWARE_STARLITE = "middleware.starlite"
675682
MIDDLEWARE_STARLITE_RECEIVE = "middleware.starlite.receive"
676683
MIDDLEWARE_STARLITE_SEND = "middleware.starlite.send"
677-
OPENAI_CHAT_COMPLETIONS_CREATE = "ai.chat_completions.create.openai"
678-
OPENAI_EMBEDDINGS_CREATE = "ai.embeddings.create.openai"
679684
HUGGINGFACE_HUB_CHAT_COMPLETIONS_CREATE = (
680685
"ai.chat_completions.create.huggingface_hub"
681686
)

sentry_sdk/integrations/openai.py

Lines changed: 18 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -161,8 +161,8 @@ def _new_chat_completion_common(f, *args, **kwargs):
161161
streaming = kwargs.get("stream")
162162

163163
span = sentry_sdk.start_span(
164-
op=consts.OP.OPENAI_CHAT_COMPLETIONS_CREATE,
165-
name="Chat Completion",
164+
op=consts.OP.GEN_AI_CHAT,
165+
name=f"{consts.OP.GEN_AI_CHAT} {model}",
166166
origin=OpenAIIntegration.origin,
167167
)
168168
span.__enter__()
@@ -171,16 +171,16 @@ def _new_chat_completion_common(f, *args, **kwargs):
171171

172172
with capture_internal_exceptions():
173173
if should_send_default_pii() and integration.include_prompts:
174-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, messages)
174+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages)
175175

176-
set_data_normalized(span, SPANDATA.AI_MODEL_ID, model)
176+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MODEL, model)
177177
set_data_normalized(span, SPANDATA.AI_STREAMING, streaming)
178178

179179
if hasattr(res, "choices"):
180180
if should_send_default_pii() and integration.include_prompts:
181181
set_data_normalized(
182182
span,
183-
SPANDATA.AI_RESPONSES,
183+
SPANDATA.GEN_AI_RESPONSE_TEXT,
184184
list(map(lambda x: x.message, res.choices)),
185185
)
186186
_calculate_token_usage(messages, res, span, None, integration.count_tokens)
@@ -212,7 +212,7 @@ def new_iterator():
212212
)
213213
if should_send_default_pii() and integration.include_prompts:
214214
set_data_normalized(
215-
span, SPANDATA.AI_RESPONSES, all_responses
215+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
216216
)
217217
_calculate_token_usage(
218218
messages,
@@ -245,7 +245,7 @@ async def new_iterator_async():
245245
)
246246
if should_send_default_pii() and integration.include_prompts:
247247
set_data_normalized(
248-
span, SPANDATA.AI_RESPONSES, all_responses
248+
span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses
249249
)
250250
_calculate_token_usage(
251251
messages,
@@ -343,24 +343,30 @@ def _new_embeddings_create_common(f, *args, **kwargs):
343343
if integration is None:
344344
return f(*args, **kwargs)
345345

346+
model = kwargs.get("model")
347+
346348
with sentry_sdk.start_span(
347-
op=consts.OP.OPENAI_EMBEDDINGS_CREATE,
348-
description="OpenAI Embedding Creation",
349+
op=consts.OP.GEN_AI_EMBEDDINGS,
350+
name=f"{consts.OP.GEN_AI_EMBEDDINGS} {model}",
349351
origin=OpenAIIntegration.origin,
350352
) as span:
351353
if "input" in kwargs and (
352354
should_send_default_pii() and integration.include_prompts
353355
):
354356
if isinstance(kwargs["input"], str):
355-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, [kwargs["input"]])
357+
set_data_normalized(
358+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [kwargs["input"]]
359+
)
356360
elif (
357361
isinstance(kwargs["input"], list)
358362
and len(kwargs["input"]) > 0
359363
and isinstance(kwargs["input"][0], str)
360364
):
361-
set_data_normalized(span, SPANDATA.AI_INPUT_MESSAGES, kwargs["input"])
365+
set_data_normalized(
366+
span, SPANDATA.GEN_AI_REQUEST_MESSAGES, kwargs["input"]
367+
)
362368
if "model" in kwargs:
363-
set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"])
369+
set_data_normalized(span, SPANDATA.GEN_AI_REQUEST_MODEL, kwargs["model"])
364370

365371
response = yield f, args, kwargs
366372

tests/integrations/openai/test_openai.py

Lines changed: 32 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -81,14 +81,17 @@ def test_nonstreaming_chat_completion(
8181
tx = events[0]
8282
assert tx["type"] == "transaction"
8383
span = tx["spans"][0]
84-
assert span["op"] == "ai.chat_completions.create.openai"
84+
assert span["op"] == "gen_ai.chat"
8585

8686
if send_default_pii and include_prompts:
87-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
88-
assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]["content"]
87+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"]
88+
assert (
89+
"the model response"
90+
in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]["content"]
91+
)
8992
else:
90-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
91-
assert SPANDATA.AI_RESPONSES not in span["data"]
93+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
94+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"]
9295

9396
assert span["data"]["gen_ai.usage.output_tokens"] == 10
9497
assert span["data"]["gen_ai.usage.input_tokens"] == 20
@@ -123,14 +126,17 @@ async def test_nonstreaming_chat_completion_async(
123126
tx = events[0]
124127
assert tx["type"] == "transaction"
125128
span = tx["spans"][0]
126-
assert span["op"] == "ai.chat_completions.create.openai"
129+
assert span["op"] == "gen_ai.chat"
127130

128131
if send_default_pii and include_prompts:
129-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
130-
assert "the model response" in span["data"][SPANDATA.AI_RESPONSES]["content"]
132+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"]
133+
assert (
134+
"the model response"
135+
in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]["content"]
136+
)
131137
else:
132-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
133-
assert SPANDATA.AI_RESPONSES not in span["data"]
138+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
139+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"]
134140

135141
assert span["data"]["gen_ai.usage.output_tokens"] == 10
136142
assert span["data"]["gen_ai.usage.input_tokens"] == 20
@@ -216,14 +222,14 @@ def test_streaming_chat_completion(
216222
tx = events[0]
217223
assert tx["type"] == "transaction"
218224
span = tx["spans"][0]
219-
assert span["op"] == "ai.chat_completions.create.openai"
225+
assert span["op"] == "gen_ai.chat"
220226

221227
if send_default_pii and include_prompts:
222-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
223-
assert "hello world" in span["data"][SPANDATA.AI_RESPONSES]
228+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"]
229+
assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
224230
else:
225-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
226-
assert SPANDATA.AI_RESPONSES not in span["data"]
231+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
232+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"]
227233

228234
try:
229235
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -312,14 +318,14 @@ async def test_streaming_chat_completion_async(
312318
tx = events[0]
313319
assert tx["type"] == "transaction"
314320
span = tx["spans"][0]
315-
assert span["op"] == "ai.chat_completions.create.openai"
321+
assert span["op"] == "gen_ai.chat"
316322

317323
if send_default_pii and include_prompts:
318-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]["content"]
319-
assert "hello world" in span["data"][SPANDATA.AI_RESPONSES]
324+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]["content"]
325+
assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]
320326
else:
321-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
322-
assert SPANDATA.AI_RESPONSES not in span["data"]
327+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
328+
assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"]
323329

324330
try:
325331
import tiktoken # type: ignore # noqa # pylint: disable=unused-import
@@ -403,11 +409,11 @@ def test_embeddings_create(
403409
tx = events[0]
404410
assert tx["type"] == "transaction"
405411
span = tx["spans"][0]
406-
assert span["op"] == "ai.embeddings.create.openai"
412+
assert span["op"] == "gen_ai.embeddings"
407413
if send_default_pii and include_prompts:
408-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
414+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
409415
else:
410-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
416+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
411417

412418
assert span["data"]["gen_ai.usage.input_tokens"] == 20
413419
assert span["data"]["gen_ai.usage.total_tokens"] == 30
@@ -451,11 +457,11 @@ async def test_embeddings_create_async(
451457
tx = events[0]
452458
assert tx["type"] == "transaction"
453459
span = tx["spans"][0]
454-
assert span["op"] == "ai.embeddings.create.openai"
460+
assert span["op"] == "gen_ai.embeddings"
455461
if send_default_pii and include_prompts:
456-
assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES]
462+
assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES]
457463
else:
458-
assert SPANDATA.AI_INPUT_MESSAGES not in span["data"]
464+
assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"]
459465

460466
assert span["data"]["gen_ai.usage.input_tokens"] == 20
461467
assert span["data"]["gen_ai.usage.total_tokens"] == 30

0 commit comments

Comments
 (0)