diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index d19d9bbdd5..1d77001684 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -351,7 +351,6 @@ def on_llm_start( metadata: "Optional[Dict[str, Any]]" = None, **kwargs: "Any", ) -> "Any": - """Run when LLM starts running.""" with capture_internal_exceptions(): if not run_id: return @@ -369,12 +368,18 @@ def on_llm_start( watched_span = self._create_span( run_id, parent_run_id, - op=OP.GEN_AI_PIPELINE, - name=kwargs.get("name") or "Langchain LLM call", + op=OP.GEN_AI_GENERATE_TEXT, + name=f"generate_text {model}".strip(), origin=LangchainIntegration.origin, ) span = watched_span.span + span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "generate_text") + + pipeline_name = kwargs.get("name") + if pipeline_name: + span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, pipeline_name) + if model: span.set_data( SPANDATA.GEN_AI_REQUEST_MODEL, diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 132da0a9a0..00feb36a50 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -851,12 +851,15 @@ def test_langchain_integration_with_langchain_core_only(sentry_init, capture_eve assert tx["type"] == "transaction" llm_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" + span + for span in tx.get("spans", []) + if span.get("op") == "gen_ai.generate_text" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] - assert llm_span["description"] == "Langchain LLM call" + assert llm_span["description"] == "generate_text gpt-3.5-turbo" + assert llm_span["data"]["gen_ai.operation.name"] == "generate_text" assert llm_span["data"]["gen_ai.request.model"] == "gpt-3.5-turbo" assert ( llm_span["data"]["gen_ai.response.text"] @@ -1062,11 +1065,12 @@ def test_langchain_message_truncation(sentry_init, capture_events): assert tx["type"] == "transaction" llm_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" + span for span in tx.get("spans", []) if span.get("op") == "gen_ai.generate_text" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] + assert llm_span["data"]["gen_ai.operation.name"] == "generate_text" assert SPANDATA.GEN_AI_REQUEST_MESSAGES in llm_span["data"] messages_data = llm_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] @@ -1776,11 +1780,12 @@ def test_langchain_response_model_extraction( assert tx["type"] == "transaction" llm_spans = [ - span for span in tx.get("spans", []) if span.get("op") == "gen_ai.pipeline" + span for span in tx.get("spans", []) if span.get("op") == "gen_ai.generate_text" ] assert len(llm_spans) > 0 llm_span = llm_spans[0] + assert llm_span["data"]["gen_ai.operation.name"] == "generate_text" if expected_model is not None: assert SPANDATA.GEN_AI_RESPONSE_MODEL in llm_span["data"]