Skip to content

Commit ece696a

Browse files
fix(openai): Attach response model with streamed Responses API
1 parent 3fe3499 commit ece696a

File tree

2 files changed

+5
-0
lines changed

2 files changed

+5
-0
lines changed

sentry_sdk/integrations/openai.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -760,6 +760,8 @@ def new_iterator() -> "Iterator[ChatCompletionChunk]":
760760
data_buf[0].append(x.delta or "")
761761

762762
if isinstance(x, ResponseCompletedEvent):
763+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, x.response.model)
764+
763765
_calculate_token_usage(
764766
input,
765767
x.response,
@@ -807,6 +809,8 @@ async def new_iterator_async() -> "AsyncIterator[ChatCompletionChunk]":
807809
data_buf[0].append(x.delta or "")
808810

809811
if isinstance(x, ResponseCompletedEvent):
812+
span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, x.response.model)
813+
810814
_calculate_token_usage(
811815
input,
812816
x.response,

tests/integrations/openai/test_openai.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2460,6 +2460,7 @@ async def test_ai_client_span_streaming_responses_async_api(
24602460

24612461
expected_data = {
24622462
"gen_ai.operation.name": "responses",
2463+
"gen_ai.response.model": "response-model-id",
24632464
"gen_ai.response.streaming": True,
24642465
"gen_ai.system": "openai",
24652466
"gen_ai.response.time_to_first_token": mock.ANY,

0 commit comments

Comments
 (0)