Skip to content

Commit b09e718

Browse files
committed
fix: several review comments
1 parent 5f73e4f commit b09e718

File tree

3 files changed

+16
-5
lines changed

3 files changed

+16
-5
lines changed

sentry_sdk/integrations/openai_agents/patches/agent_run.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ async def patched_execute_final_output(
163163
# (For non-streaming, the workflow span is closed by the context manager in _create_run_wrapper)
164164
if agent and hasattr(agent, "_sentry_workflow_span"):
165165
workflow_span = agent._sentry_workflow_span
166-
workflow_span.__exit__(None, None, None)
166+
workflow_span.__exit__(*sys.exc_info())
167167
delattr(agent, "_sentry_workflow_span")
168168

169169
return result

sentry_sdk/integrations/openai_agents/patches/models.py

Lines changed: 14 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,11 @@ async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any":
9292
Wrap stream_response to create an AI client span for streaming.
9393
stream_response is an async generator, so we yield events within the span.
9494
95+
Note: We use explicit try/finally instead of a context manager because
96+
if the consumer abandons the stream (breaks early, network error, etc.),
97+
the context manager's __exit__ may not be called. With try/finally,
98+
cleanup happens even when GeneratorExit is thrown.
99+
95100
Note: stream_response is called with positional args unlike get_response
96101
which uses keyword args. The signature is:
97102
stream_response(
@@ -108,17 +113,21 @@ async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any":
108113
prompt,
109114
)
110115
"""
116+
import sys
117+
111118
# Build kwargs dict from positional args for span data capture
112119
span_kwargs = dict(kwargs)
113120
if len(args) > 0:
114121
span_kwargs["system_instructions"] = args[0]
115122
if len(args) > 1:
116123
span_kwargs["input"] = args[1]
117124

118-
with ai_client_span(agent, span_kwargs) as span:
119-
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
125+
span = ai_client_span(agent, span_kwargs)
126+
span.__enter__()
127+
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
120128

121-
streaming_response = None
129+
streaming_response = None
130+
try:
122131
async for event in original_stream_response(*args, **kwargs):
123132
# Capture the full response from ResponseCompletedEvent
124133
if hasattr(event, "response"):
@@ -135,6 +144,8 @@ async def wrapped_stream_response(*args: "Any", **kwargs: "Any") -> "Any":
135144
)
136145
_set_response_model_on_agent_span(agent, response_model)
137146
update_ai_client_span(span, streaming_response)
147+
finally:
148+
span.__exit__(*sys.exc_info())
138149

139150
model.stream_response = wrapped_stream_response
140151

sentry_sdk/integrations/openai_agents/spans/ai_client.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,7 @@ def update_ai_client_span(
6161
_set_usage_data(span, response.usage)
6262

6363
# Set output data and create MCP tool spans if available
64-
if hasattr(response, "output"):
64+
if hasattr(response, "output") and response.output:
6565
_set_output_data(span, response)
6666
_create_mcp_execute_tool_spans(span, response)
6767

0 commit comments

Comments
 (0)