Skip to content
Draft
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
122 changes: 6 additions & 116 deletions tests/integrations/langchain/test_langchain.py
Original file line number Diff line number Diff line change
Expand Up @@ -214,11 +214,17 @@ def test_langchain_agent(

tx = events[0]
assert tx["type"] == "transaction"
assert tx["contexts"]["trace"]["origin"] == "manual"

chat_spans = list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")
tool_exec_span = next(x for x in tx["spans"] if x["op"] == "gen_ai.execute_tool")

assert len(chat_spans) == 2

assert chat_spans[0]["origin"] == "auto.ai.langchain"
assert chat_spans[1]["origin"] == "auto.ai.langchain"
assert tool_exec_span["origin"] == "auto.ai.langchain"

# We can't guarantee anything about the "shape" of the langchain execution graph
assert len(list(x for x in tx["spans"] if x["op"] == "gen_ai.chat")) > 0

Expand Down Expand Up @@ -389,122 +395,6 @@ def test_span_status_error(sentry_init, capture_events):
assert transaction["contexts"]["trace"]["status"] == "internal_error"


def test_span_origin(sentry_init, capture_events):
sentry_init(
integrations=[LangchainIntegration()],
traces_sample_rate=1.0,
)
events = capture_events()

prompt = ChatPromptTemplate.from_messages(
[
(
"system",
"You are very powerful assistant, but don't know current events",
),
("user", "{input}"),
MessagesPlaceholder(variable_name="agent_scratchpad"),
]
)
global stream_result_mock
stream_result_mock = Mock(
side_effect=[
[
ChatGenerationChunk(
type="ChatGenerationChunk",
message=AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": "call_BbeyNhCKa6kYLYzrD40NGm3b",
"function": {
"arguments": "",
"name": "get_word_length",
},
"type": "function",
}
]
},
),
),
ChatGenerationChunk(
type="ChatGenerationChunk",
message=AIMessageChunk(
content="",
additional_kwargs={
"tool_calls": [
{
"index": 0,
"id": None,
"function": {
"arguments": '{"word": "eudca"}',
"name": None,
},
"type": None,
}
]
},
),
),
ChatGenerationChunk(
type="ChatGenerationChunk",
message=AIMessageChunk(
content="5",
usage_metadata={
"input_tokens": 142,
"output_tokens": 50,
"total_tokens": 192,
"input_token_details": {"audio": 0, "cache_read": 0},
"output_token_details": {"audio": 0, "reasoning": 0},
},
),
generation_info={"finish_reason": "function_call"},
),
],
[
ChatGenerationChunk(
text="The word eudca has 5 letters.",
type="ChatGenerationChunk",
message=AIMessageChunk(
content="The word eudca has 5 letters.",
usage_metadata={
"input_tokens": 89,
"output_tokens": 28,
"total_tokens": 117,
"input_token_details": {"audio": 0, "cache_read": 0},
"output_token_details": {"audio": 0, "reasoning": 0},
},
),
),
ChatGenerationChunk(
type="ChatGenerationChunk",
generation_info={"finish_reason": "stop"},
message=AIMessageChunk(content=""),
),
],
]
)
llm = MockOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
openai_api_key="badkey",
)
agent = create_openai_tools_agent(llm, [get_word_length], prompt)

agent_executor = AgentExecutor(agent=agent, tools=[get_word_length], verbose=True)

with start_transaction():
list(agent_executor.stream({"input": "How many letters in the word eudca"}))

(event,) = events

assert event["contexts"]["trace"]["origin"] == "manual"
for span in event["spans"]:
assert span["origin"] == "auto.ai.langchain"


def test_manual_callback_no_duplication(sentry_init):
"""
Test that when a user manually provides a SentryLangchainCallback,
Expand Down
Loading