@@ -62,7 +62,7 @@ def _capture_exception(exc):
6262def _wrap_huggingface_task (f , op ):
6363 # type: (Callable[..., Any], str) -> Callable[..., Any]
6464 @wraps (f )
65- def new_text_generation (* args , ** kwargs ):
65+ def new_huggingface_task (* args , ** kwargs ):
6666 # type: (*Any, **Any) -> Any
6767 integration = sentry_sdk .get_client ().get_integration (HuggingfaceHubIntegration )
6868 if integration is None :
@@ -82,7 +82,6 @@ def new_text_generation(*args, **kwargs):
8282
8383 client = args [0 ]
8484 model = client .model or kwargs .get ("model" ) or ""
85- streaming = kwargs .get ("stream" )
8685 operation_name = op .split ("." )[- 1 ]
8786
8887 span = sentry_sdk .start_span (
@@ -93,9 +92,29 @@ def new_text_generation(*args, **kwargs):
9392 span .__enter__ ()
9493
9594 span .set_data (SPANDATA .GEN_AI_OPERATION_NAME , operation_name )
95+
9696 if model :
9797 span .set_data (SPANDATA .GEN_AI_REQUEST_MODEL , model )
9898
99+ # Input attributes
100+ attribute_mapping = {
101+ "tools" : SPANDATA .GEN_AI_REQUEST_AVAILABLE_TOOLS ,
102+ "frequency_penalty" : SPANDATA .GEN_AI_REQUEST_FREQUENCY_PENALTY ,
103+ "max_tokens" : SPANDATA .GEN_AI_REQUEST_MAX_TOKENS ,
104+ "presence_penalty" : SPANDATA .GEN_AI_REQUEST_PRESENCE_PENALTY ,
105+ "temperature" : SPANDATA .GEN_AI_REQUEST_TEMPERATURE ,
106+ "top_p" : SPANDATA .GEN_AI_REQUEST_TOP_P ,
107+ "top_k" : SPANDATA .GEN_AI_REQUEST_TOP_K ,
108+ "stream" : SPANDATA .GEN_AI_RESPONSE_STREAMING ,
109+ }
110+ for attribute , span_attribute in attribute_mapping .items ():
111+ value = kwargs .get (attribute , None )
112+ if value is not None :
113+ if isinstance (value , (int , float , bool , str )):
114+ span .set_data (span_attribute , value )
115+ else :
116+ set_data_normalized (span , span_attribute , value , unpack = False )
117+
99118 try :
100119 res = f (* args , ** kwargs )
101120 except Exception as e :
@@ -105,30 +124,56 @@ def new_text_generation(*args, **kwargs):
105124 raise e from None
106125
107126 with capture_internal_exceptions ():
127+ # Output attributes
128+ if hasattr (res , "model" ):
129+ model = res .model
130+ if model :
131+ span .set_data (SPANDATA .GEN_AI_RESPONSE_MODEL , model )
132+
133+ if hasattr (res , "details" ) and res .details is not None :
134+ finish_reason = getattr (res .details , "finish_reason" , None )
135+ if finish_reason :
136+ span .set_data (
137+ SPANDATA .GEN_AI_RESPONSE_FINISH_REASONS , finish_reason
138+ )
139+
140+ try :
141+ tool_calls = res .choices [0 ].message .tool_calls
142+ except Exception :
143+ tool_calls = []
144+
145+ if len (tool_calls ) > 0 :
146+ set_data_normalized (
147+ span ,
148+ SPANDATA .GEN_AI_RESPONSE_TOOL_CALLS ,
149+ tool_calls ,
150+ unpack = False ,
151+ )
152+
108153 if should_send_default_pii () and integration .include_prompts :
109154 set_data_normalized (
110155 span , SPANDATA .GEN_AI_REQUEST_MESSAGES , prompt , unpack = False
111156 )
112157
113- span .set_data (SPANDATA .GEN_AI_RESPONSE_STREAMING , streaming )
114-
115158 if isinstance (res , str ):
116159 if should_send_default_pii () and integration .include_prompts :
117- set_data_normalized (
118- span ,
119- SPANDATA .GEN_AI_RESPONSE_TEXT ,
120- res ,
121- )
160+ if res :
161+ set_data_normalized (
162+ span ,
163+ SPANDATA .GEN_AI_RESPONSE_TEXT ,
164+ res ,
165+ )
122166 span .__exit__ (None , None , None )
123167 return res
124168
125169 if isinstance (res , TextGenerationOutput ):
126170 if should_send_default_pii () and integration .include_prompts :
127- set_data_normalized (
128- span ,
129- SPANDATA .GEN_AI_RESPONSE_TEXT ,
130- res .generated_text ,
131- )
171+ if res .generated_text :
172+ set_data_normalized (
173+ span ,
174+ SPANDATA .GEN_AI_RESPONSE_TEXT ,
175+ res .generated_text ,
176+ )
132177 if res .details is not None and res .details .generated_tokens > 0 :
133178 record_token_usage (
134179 span ,
@@ -140,13 +185,17 @@ def new_text_generation(*args, **kwargs):
140185 if isinstance (res , ChatCompletionOutput ):
141186 if should_send_default_pii () and integration .include_prompts :
142187 text_response = "" .join (
143- [x .get ("message" , {}).get ("content" ) for x in res .choices ]
144- )
145- set_data_normalized (
146- span ,
147- SPANDATA .GEN_AI_RESPONSE_TEXT ,
148- text_response ,
188+ [
189+ x .get ("message" , {}).get ("content" , None ) or ""
190+ for x in res .choices
191+ ]
149192 )
193+ if text_response :
194+ set_data_normalized (
195+ span ,
196+ SPANDATA .GEN_AI_RESPONSE_TEXT ,
197+ text_response ,
198+ )
150199 if hasattr (res , "usage" ) and res .usage is not None :
151200 record_token_usage (
152201 span ,
@@ -182,11 +231,13 @@ def new_details_iterator():
182231 and should_send_default_pii ()
183232 and integration .include_prompts
184233 ):
185- set_data_normalized (
186- span ,
187- SPANDATA .GEN_AI_RESPONSE_TEXT ,
188- "" .join (data_buf ),
189- )
234+ text_response = "" .join (data_buf )
235+ if text_response :
236+ set_data_normalized (
237+ span ,
238+ SPANDATA .GEN_AI_RESPONSE_TEXT ,
239+ text_response ,
240+ )
190241 if tokens_used > 0 :
191242 record_token_usage (
192243 span ,
@@ -211,13 +262,15 @@ def new_iterator():
211262 and should_send_default_pii ()
212263 and integration .include_prompts
213264 ):
214- set_data_normalized (
215- span ,
216- SPANDATA .GEN_AI_RESPONSE_TEXT ,
217- "" .join (data_buf ),
218- )
265+ text_response = "" .join (data_buf )
266+ if text_response :
267+ set_data_normalized (
268+ span ,
269+ SPANDATA .GEN_AI_RESPONSE_TEXT ,
270+ text_response ,
271+ )
219272 span .__exit__ (None , None , None )
220273
221274 return new_iterator ()
222275
223- return new_text_generation
276+ return new_huggingface_task
0 commit comments