Skip to content

Commit 4ab8e74

Browse files
ref(openai): Extract input in API-specific functions
1 parent d94f953 commit 4ab8e74

File tree

1 file changed

+25
-21
lines changed

1 file changed

+25
-21
lines changed

sentry_sdk/integrations/openai.py

Lines changed: 25 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -465,22 +465,16 @@ def _set_embeddings_input_data(
465465
def _common_set_output_data(
466466
span: "Span",
467467
response: "Any",
468-
kwargs: "dict[str, Any]",
468+
input: "Any",
469469
integration: "OpenAIIntegration",
470470
start_time: "Optional[float]" = None,
471471
finish_span: bool = True,
472472
) -> None:
473473
if hasattr(response, "model"):
474474
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model)
475475

476-
# Input messages (the prompt or data sent to the model)
477-
# used for the token usage calculation
478-
messages = kwargs.get("messages")
479-
if messages is None:
480-
messages = kwargs.get("input")
481-
482-
if messages is not None and isinstance(messages, str):
483-
messages = [messages]
476+
if input is not None and isinstance(input, str):
477+
input = [input]
484478

485479
ttft: "Optional[float]" = None
486480

@@ -494,7 +488,7 @@ def _common_set_output_data(
494488
if len(response_text) > 0:
495489
set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text)
496490

497-
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
491+
_calculate_token_usage(input, response, span, None, integration.count_tokens)
498492

499493
if finish_span:
500494
span.__exit__(None, None, None)
@@ -530,7 +524,7 @@ def _common_set_output_data(
530524
span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"]
531525
)
532526

533-
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
527+
_calculate_token_usage(input, response, span, None, integration.count_tokens)
534528

535529
if finish_span:
536530
span.__exit__(None, None, None)
@@ -571,7 +565,7 @@ def new_iterator() -> "Iterator[ChatCompletionChunk]":
571565
# OpenAI responses API end of streaming response
572566
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
573567
_calculate_token_usage(
574-
messages,
568+
input,
575569
x.response,
576570
span,
577571
None,
@@ -594,7 +588,7 @@ def new_iterator() -> "Iterator[ChatCompletionChunk]":
594588
)
595589
if count_tokens_manually:
596590
_calculate_token_usage(
597-
messages,
591+
input,
598592
response,
599593
span,
600594
all_responses,
@@ -635,7 +629,7 @@ async def new_iterator_async() -> "AsyncIterator[ChatCompletionChunk]":
635629
# OpenAI responses API end of streaming response
636630
if RESPONSES_API_ENABLED and isinstance(x, ResponseCompletedEvent):
637631
_calculate_token_usage(
638-
messages,
632+
input,
639633
x.response,
640634
span,
641635
None,
@@ -658,7 +652,7 @@ async def new_iterator_async() -> "AsyncIterator[ChatCompletionChunk]":
658652
)
659653
if count_tokens_manually:
660654
_calculate_token_usage(
661-
messages,
655+
input,
662656
response,
663657
span,
664658
all_responses,
@@ -672,7 +666,7 @@ async def new_iterator_async() -> "AsyncIterator[ChatCompletionChunk]":
672666
else:
673667
response._iterator = new_iterator()
674668
else:
675-
_calculate_token_usage(messages, response, span, None, integration.count_tokens)
669+
_calculate_token_usage(input, response, span, None, integration.count_tokens)
676670
if finish_span:
677671
span.__exit__(None, None, None)
678672

@@ -727,10 +721,12 @@ def _set_completions_api_output_data(
727721
start_time: "Optional[float]" = None,
728722
finish_span: bool = True,
729723
) -> None:
724+
messages = kwargs.get("messages")
725+
730726
_common_set_output_data(
731727
span,
732728
response,
733-
kwargs,
729+
messages,
734730
integration,
735731
start_time,
736732
finish_span,
@@ -745,10 +741,12 @@ def _set_streaming_completions_api_output_data(
745741
start_time: "Optional[float]" = None,
746742
finish_span: bool = True,
747743
) -> None:
744+
messages = kwargs.get("messages")
745+
748746
_common_set_output_data(
749747
span,
750748
response,
751-
kwargs,
749+
messages,
752750
integration,
753751
start_time,
754752
finish_span,
@@ -763,10 +761,12 @@ def _set_responses_api_output_data(
763761
start_time: "Optional[float]" = None,
764762
finish_span: bool = True,
765763
) -> None:
764+
input = kwargs.get("input")
765+
766766
_common_set_output_data(
767767
span,
768768
response,
769-
kwargs,
769+
input,
770770
integration,
771771
start_time,
772772
finish_span,
@@ -781,10 +781,12 @@ def _set_streaming_responses_api_output_data(
781781
start_time: "Optional[float]" = None,
782782
finish_span: bool = True,
783783
) -> None:
784+
input = kwargs.get("input")
785+
784786
_common_set_output_data(
785787
span,
786788
response,
787-
kwargs,
789+
input,
788790
integration,
789791
start_time,
790792
finish_span,
@@ -799,10 +801,12 @@ def _set_embeddings_output_data(
799801
start_time: "Optional[float]" = None,
800802
finish_span: bool = True,
801803
) -> None:
804+
input = kwargs.get("input")
805+
802806
_common_set_output_data(
803807
span,
804808
response,
805-
kwargs,
809+
input,
806810
integration,
807811
start_time,
808812
finish_span,

0 commit comments

Comments
 (0)