Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 37 additions & 0 deletions sentry_sdk/ai/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -747,3 +747,40 @@ def set_conversation_id(conversation_id: str) -> None:
"""
scope = sentry_sdk.get_current_scope()
scope.set_conversation_id(conversation_id)


import contextvars

_gen_ai_agent_stack: "contextvars.ContextVar[Optional[list[Optional[str]]]]" = (
contextvars.ContextVar("gen_ai_agent_stack", default=None)
)


def push_agent_name(agent_name: "Optional[str]") -> None:
"""Push an agent name onto the stack."""
stack = _gen_ai_agent_stack.get()
if stack is None:
stack = []
else:
stack = stack.copy()
stack.append(agent_name)
_gen_ai_agent_stack.set(stack)


def pop_agent_name() -> "Optional[str]":
"""Pop an agent name from the stack and return it."""
stack = _gen_ai_agent_stack.get()
if stack:
stack = stack.copy()
agent_name = stack.pop()
_gen_ai_agent_stack.set(stack)
return agent_name
return None


def get_current_agent_name() -> "Optional[str]":
"""Get the current agent name (top of stack) without removing it."""
stack = _gen_ai_agent_stack.get()
if stack:
return stack[-1]
return None
1 change: 1 addition & 0 deletions sentry_sdk/integrations/langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -650,6 +650,7 @@ def on_tool_start(

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool")
span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name)
span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function")

tool_description = serialized.get("description")
if tool_description is not None:
Expand Down
122 changes: 65 additions & 57 deletions sentry_sdk/integrations/langgraph.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@
set_data_normalized,
normalize_message_roles,
truncate_and_annotate_messages,
push_agent_name,
pop_agent_name,
)
from sentry_sdk.consts import OP, SPANDATA
from sentry_sdk.integrations import DidNotEnable, Integration
Expand Down Expand Up @@ -168,36 +170,39 @@ def new_invoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
if graph_name:
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")

# Store input messages to later compare with output
input_messages = None
if (
len(args) > 0
and should_send_default_pii()
and integration.include_prompts
):
input_messages = _parse_langgraph_messages(args[0])
if input_messages:
normalized_input_messages = normalize_message_roles(input_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_input_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
push_agent_name(graph_name)

try:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")

# Store input messages to later compare with output
input_messages = None
if (
len(args) > 0
and should_send_default_pii()
and integration.include_prompts
):
input_messages = _parse_langgraph_messages(args[0])
if input_messages:
normalized_input_messages = normalize_message_roles(input_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_input_messages, span, scope
)

result = f(self, *args, **kwargs)

_set_response_attributes(span, input_messages, result, integration)

return result
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)

result = f(self, *args, **kwargs)
_set_response_attributes(span, input_messages, result, integration)
return result
finally:
if graph_name:
pop_agent_name()

return new_invoke

Expand All @@ -222,35 +227,38 @@ async def new_ainvoke(self: "Any", *args: "Any", **kwargs: "Any") -> "Any":
if graph_name:
span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name)
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name)

span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")

input_messages = None
if (
len(args) > 0
and should_send_default_pii()
and integration.include_prompts
):
input_messages = _parse_langgraph_messages(args[0])
if input_messages:
normalized_input_messages = normalize_message_roles(input_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_input_messages, span, scope
)
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
push_agent_name(graph_name)

try:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")

input_messages = None
if (
len(args) > 0
and should_send_default_pii()
and integration.include_prompts
):
input_messages = _parse_langgraph_messages(args[0])
if input_messages:
normalized_input_messages = normalize_message_roles(input_messages)
scope = sentry_sdk.get_current_scope()
messages_data = truncate_and_annotate_messages(
normalized_input_messages, span, scope
)

result = await f(self, *args, **kwargs)

_set_response_attributes(span, input_messages, result, integration)

return result
if messages_data is not None:
set_data_normalized(
span,
SPANDATA.GEN_AI_REQUEST_MESSAGES,
messages_data,
unpack=False,
)

result = await f(self, *args, **kwargs)
_set_response_attributes(span, input_messages, result, integration)
return result
finally:
if graph_name:
pop_agent_name()

return new_ainvoke

Expand Down
5 changes: 5 additions & 0 deletions sentry_sdk/integrations/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
normalize_message_roles,
truncate_and_annotate_messages,
truncate_and_annotate_embedding_inputs,
get_current_agent_name,
)
from sentry_sdk.ai._openai_completions_api import (
_is_system_instruction as _is_system_instruction_completions,
Expand Down Expand Up @@ -226,6 +227,10 @@ def _commmon_set_input_data(
# Input attributes: Common
set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai")

agent_name = get_current_agent_name()
if agent_name:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name)

# Input attributes: Optional
kwargs_keys_to_attributes = {
"model": SPANDATA.GEN_AI_REQUEST_MODEL,
Expand Down
76 changes: 2 additions & 74 deletions sentry_sdk/integrations/pydantic_ai/spans/ai_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,8 @@
get_is_streaming,
)
from .utils import (
_serialize_binary_content_item,
_serialize_image_url_item,
_set_usage_data,
_format_messages,
)

from typing import TYPE_CHECKING
Expand All @@ -35,24 +34,16 @@
try:
from pydantic_ai.messages import (
BaseToolCallPart,
BaseToolReturnPart,
SystemPromptPart,
UserPromptPart,
TextPart,
ThinkingPart,
BinaryContent,
ImageUrl,
)
except ImportError:
# Fallback if these classes are not available
BaseToolCallPart = None
BaseToolReturnPart = None
SystemPromptPart = None
UserPromptPart = None
TextPart = None
ThinkingPart = None
BinaryContent = None
ImageUrl = None


def _transform_system_instructions(
Expand Down Expand Up @@ -116,70 +107,7 @@ def _set_input_messages(span: "sentry_sdk.tracing.Span", messages: "Any") -> Non
)

try:
formatted_messages = []

for msg in messages:
if hasattr(msg, "parts"):
for part in msg.parts:
role = "user"
# Use isinstance checks with proper base classes
if SystemPromptPart and isinstance(part, SystemPromptPart):
continue
elif (
(TextPart and isinstance(part, TextPart))
or (ThinkingPart and isinstance(part, ThinkingPart))
or (BaseToolCallPart and isinstance(part, BaseToolCallPart))
):
role = "assistant"
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
role = "tool"

content: "List[Dict[str, Any] | str]" = []
tool_calls = None
tool_call_id = None

# Handle ToolCallPart (assistant requesting tool use)
if BaseToolCallPart and isinstance(part, BaseToolCallPart):
tool_call_data = {}
if hasattr(part, "tool_name"):
tool_call_data["name"] = part.tool_name
if hasattr(part, "args"):
tool_call_data["arguments"] = safe_serialize(part.args)
if tool_call_data:
tool_calls = [tool_call_data]
# Handle ToolReturnPart (tool result)
elif BaseToolReturnPart and isinstance(part, BaseToolReturnPart):
if hasattr(part, "tool_name"):
tool_call_id = part.tool_name
if hasattr(part, "content"):
content.append({"type": "text", "text": str(part.content)})
# Handle regular content
elif hasattr(part, "content"):
if isinstance(part.content, str):
content.append({"type": "text", "text": part.content})
elif isinstance(part.content, list):
for item in part.content:
if isinstance(item, str):
content.append({"type": "text", "text": item})
elif ImageUrl and isinstance(item, ImageUrl):
content.append(_serialize_image_url_item(item))
elif BinaryContent and isinstance(item, BinaryContent):
content.append(_serialize_binary_content_item(item))
else:
content.append(safe_serialize(item))
else:
content.append({"type": "text", "text": str(part.content)})
# Add message if we have content or tool calls
if content or tool_calls:
message: "Dict[str, Any]" = {"role": role}
if content:
message["content"] = content
if tool_calls:
message["tool_calls"] = tool_calls
if tool_call_id:
message["tool_call_id"] = tool_call_id
formatted_messages.append(message)

formatted_messages = _format_messages(messages)
if formatted_messages:
normalized_messages = normalize_message_roles(formatted_messages)
scope = sentry_sdk.get_current_scope()
Expand Down
28 changes: 24 additions & 4 deletions sentry_sdk/integrations/pydantic_ai/spans/invoke_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
_serialize_binary_content_item,
_serialize_image_url_item,
_set_usage_data,
_format_messages,
)

from typing import TYPE_CHECKING
Expand Down Expand Up @@ -142,10 +143,29 @@ def update_invoke_agent_span(span: "sentry_sdk.tracing.Span", result: "Any") ->
output = getattr(result, "output", None)

# Set response text if prompts are enabled
if _should_send_prompts() and output:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, str(output), unpack=False
)
if _should_send_prompts():
messages = None
if hasattr(result, "new_messages") and callable(result.new_messages):
try:
messages = result.new_messages()
except Exception:
pass
elif hasattr(result, "all_messages") and callable(result.all_messages):
try:
messages = result.all_messages()
except Exception:
pass

formatted_messages = _format_messages(messages) if messages else []

if formatted_messages:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, formatted_messages, unpack=False
)
elif output:
set_data_normalized(
span, SPANDATA.GEN_AI_RESPONSE_TEXT, str(output), unpack=False
)

# Set token usage data if available
if hasattr(result, "usage") and callable(result.usage):
Expand Down
Loading
Loading