Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 46 additions & 79 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,13 @@
from sentry_sdk.tracing_utils import _get_value, set_span_errored
from sentry_sdk.utils import capture_internal_exceptions, logger

CURRENT_LANGCHAIN_AGENT_NAME = contextvars.ContextVar("CURRENT_LANGCHAIN_AGENT_NAME", default=None)


def _get_current_langchain_agent_name() -> "Optional[str]":
return CURRENT_LANGCHAIN_AGENT_NAME.get(None)


if TYPE_CHECKING:
from typing import (
Any,
Expand Down Expand Up @@ -154,43 +161,6 @@ def _transform_langchain_message_content(content: "Any") -> "Any":


# Contextvar to track agent names in a stack for re-entrant agent support
_agent_stack: "contextvars.ContextVar[Optional[List[Optional[str]]]]" = (
contextvars.ContextVar("langchain_agent_stack", default=None)
)


def _push_agent(agent_name: "Optional[str]") -> None:
"""Push an agent name onto the stack."""
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Stale comment now misleadingly annotates unrelated function

Low Severity

The comment # Contextvar to track agent names in a stack for re-entrant agent support originally documented the _agent_stack ContextVar that was removed in this diff. The comment was left behind and now sits directly above _get_system_instructions, making it look like it describes that unrelated function. This is misleading for anyone reading the code.

Fix in Cursor Fix in Web

stack = _agent_stack.get()
if stack is None:
stack = []
else:
# Copy the list to maintain contextvar isolation across async contexts
stack = stack.copy()
stack.append(agent_name)
_agent_stack.set(stack)


def _pop_agent() -> "Optional[str]":
"""Pop an agent name from the stack and return it."""
stack = _agent_stack.get()
if stack:
# Copy the list to maintain contextvar isolation across async contexts
stack = stack.copy()
agent_name = stack.pop()
_agent_stack.set(stack)
return agent_name
return None


def _get_current_agent() -> "Optional[str]":
"""Get the current agent name (top of stack) without removing it."""
stack = _agent_stack.get()
if stack:
return stack[-1]
return None


def _get_system_instructions(messages: "List[List[BaseMessage]]") -> "List[str]":
system_instructions = []

Expand Down Expand Up @@ -327,6 +297,11 @@ def _create_span(
watched_span = WatchedSpan(sentry_sdk.start_span(**kwargs))

watched_span.span.__enter__()

agent_name = _get_current_langchain_agent_name()
if agent_name:
watched_span.span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

self.span_map[run_id] = watched_span
self.gc_span_map()
return watched_span
Expand Down Expand Up @@ -455,10 +430,6 @@ def on_chat_model_start(
elif "openai" in ai_type:
span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai")

agent_name = _get_current_agent()
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

for key, attribute in DATA_FIELDS.items():
if key in all_params and all_params[key] is not None:
set_data_normalized(span, attribute, all_params[key], unpack=False)
Expand Down Expand Up @@ -655,10 +626,6 @@ def on_tool_start(
if tool_description is not None:
span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description)

agent_name = _get_current_agent()
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

if should_send_default_pii() and self.include_prompts:
set_data_normalized(
span,
Expand Down Expand Up @@ -978,17 +945,15 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
return f(self, *args, **kwargs)

agent_name, tools = _get_request_data(self, args, kwargs)
token = CURRENT_LANGCHAIN_AGENT_NAME.set(agent_name)
start_span_function = get_start_span_function()

with start_span_function(
try:
op=OP.GEN_AI_INVOKE_AGENT,
name=f"invoke_agent {agent_name}" if agent_name else "invoke_agent",
origin=LangchainIntegration.origin,
) as span:
_push_agent(agent_name)
try:
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False)
Expand Down Expand Up @@ -1028,7 +993,8 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
return result
finally:
# Ensure agent is popped even if an exception occurs
_pop_agent()
finally:
CURRENT_LANGCHAIN_AGENT_NAME.reset(token)

return new_invoke

Expand All @@ -1041,6 +1007,7 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
return f(self, *args, **kwargs)

agent_name, tools = _get_request_data(self, args, kwargs)
token = CURRENT_LANGCHAIN_AGENT_NAME.set(agent_name)
start_span_function = get_start_span_function()

span = start_span_function(
Expand All @@ -1050,37 +1017,37 @@ def new_stream(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
)
span.__enter__()

_push_agent(agent_name)

if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
try:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)

_set_tools_on_span(span, tools)
_set_tools_on_span(span, tools)

input = args[0].get("input") if len(args) >= 1 else None
if (
input is not None
and should_send_default_pii()
and integration.include_prompts
):
normalized_messages = normalize_message_roles([input])
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
input = args[0].get("input") if len(args) >= 1 else None
if (
input is not None
and should_send_default_pii()
and integration.include_prompts
):
normalized_messages = normalize_message_roles([input])
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)

# Run the agent
result = f(self, *args, **kwargs)
# Run the agent
result = f(self, *args, **kwargs)
except Exception:
span.__exit__(None, None, None)
CURRENT_LANGCHAIN_AGENT_NAME.reset(token)
raise

old_iterator = result

Expand All @@ -1107,8 +1074,8 @@ def new_iterator() -> "Iterator[Any]":
raise
finally:
# Ensure cleanup happens even if iterator is abandoned or fails
_pop_agent()
span.__exit__(*exc_info)
CURRENT_LANGCHAIN_AGENT_NAME.reset(token)

async def new_iterator_async() -> "AsyncIterator[Any]":
exc_info: "tuple[Any, Any, Any]" = (None, None, None)
Expand All @@ -1133,8 +1100,8 @@ async def new_iterator_async() -> "AsyncIterator[Any]":
raise
finally:
# Ensure cleanup happens even if iterator is abandoned or fails
_pop_agent()
span.__exit__(*exc_info)
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Bug: Calling CURRENT_LANGCHAIN_AGENT_NAME.reset(token) in the new_iterator_async generator will raise a ValueError because the token was created in a different, synchronous context.
Severity: HIGH

Suggested Fix

The ContextVar token should be set and reset within the same context. Move the CURRENT_LANGCHAIN_AGENT_NAME.set(agent_name) call inside the new_iterator_async generator, so that both setting the variable and resetting it happen within the async generator's context. This will ensure the token is valid when reset() is called in the finally block.

Prompt for AI Agent
Review the code at the location below. A potential bug has been identified by an AI
agent.
Verify if this is a real issue. If it is, propose a fix; if not, explain why it's not
valid.

Location: sentry_sdk/integrations/langchain.py#L1103

Potential issue: The `token` for the `CURRENT_LANGCHAIN_AGENT_NAME` context variable is
created in the synchronous function `new_stream`. However, the `new_iterator_async`
generator, which runs in a separate, copied context, attempts to reset this token in its
`finally` block. According to Python's context variable specification (PEP 567), calling
`reset()` with a token from a different context raises a `ValueError`. This will cause
any async streaming agent calls to crash during cleanup when the generator is exhausted
or closed. The synchronous version is unaffected as it operates within the same context.

CURRENT_LANGCHAIN_AGENT_NAME.reset(token)

if str(type(result)) == "<class 'async_generator'>":
result = new_iterator_async()
Expand Down
34 changes: 34 additions & 0 deletions sentry_sdk/integrations/pydantic_ai/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,40 @@


class PydanticAIIntegration(Integration):
"""
Integration for Pydantic AI.

This integration instruments Pydantic AI agents to capture traces and errors.

Example:
Install the integration:

.. code-block:: bash

pip install sentry-sdk[pydantic_ai]

Configure the integration:

.. code-block:: python

import sentry_sdk
from sentry_sdk.integrations.pydantic_ai import PydanticAIIntegration

sentry_sdk.init(
dsn="your-dsn",
integrations=[PydanticAIIntegration()],
)

Use Pydantic AI:

.. code-block:: python

from pydantic_ai import Agent

agent = Agent("test", output_type=SupportResponse)
result = await agent.run("Hello")
print(result.output)
"""
identifier = "pydantic_ai"
origin = f"auto.ai.{identifier}"

Expand Down
40 changes: 40 additions & 0 deletions tests/integrations/langchain/test_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,46 @@ def test_langchain_agent(
)


def test_langchain_agent_name_propagation(sentry_init, capture_events, monkeypatch):
sentry_init(
integrations=[LangchainIntegration(include_prompts=True)],
traces_sample_rate=1.0,
send_default_pii=True,
)
events = capture_events()

prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are very powerful assistant, but don't know current events",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)

llm = MockOpenAI(model_name="gpt-3.5-turbo", temperature=0, openai_api_key="badkey")
agent = create_openai_tools_agent(llm, [get_word_length], prompt)
agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True)

# Mock _get_request_data to return a test agent name
def mock_get_request_data(self, args, kwargs):
return "test_agent_name", [get_word_length]

monkeypatch.setattr(
"sentry_sdk.integrations.langchain._get_request_data", mock_get_request_data
)

with start_transaction():
list(agent_executor.stream({"input": "How many letters in the word eudca"}))

tx = events[0]
assert tx["type"] == "transaction"
for span in tx["spans"]:
assert span["data"].get(SPANDATA.GEN_AI_AGENT_NAME) == "test_agent_name"


def test_langchain_error(sentry_init, capture_events):
sentry_init(
integrations=[LangchainIntegration(include_prompts=True)],
Expand Down
Loading