From 42d5f80054e0b9adce3b5046f606d94127a2559f Mon Sep 17 00:00:00 2001 From: fcogidi <41602287+fcogidi@users.noreply.github.com> Date: Fri, 16 Jan 2026 22:51:25 -0500 Subject: [PATCH 1/2] Refactor LangFuse integration to enable grouping traces into sessions --- .../1_react_rag/langfuse_gradio.py | 16 ++-- src/2_frameworks/2_multi_agent/efficient.py | 34 ++++--- .../2_multi_agent/efficient_multiple_kbs.py | 33 +++---- src/2_frameworks/2_multi_agent/fan_out.py | 76 +++++++++------- src/2_frameworks/2_multi_agent/verbose.py | 89 ++++++++++++------- src/2_frameworks/3_code_interpreter/app.py | 26 ++++-- src/2_frameworks/4_mcp/app.py | 16 +++- .../2_synthetic_data/synthesize_data.py | 5 +- .../2_synthetic_data/synthesize_data_e2b.py | 4 +- src/utils/langfuse/oai_sdk_setup.py | 2 + 10 files changed, 190 insertions(+), 111 deletions(-) diff --git a/src/2_frameworks/1_react_rag/langfuse_gradio.py b/src/2_frameworks/1_react_rag/langfuse_gradio.py index 156bcab..1acae71 100644 --- a/src/2_frameworks/1_react_rag/langfuse_gradio.py +++ b/src/2_frameworks/1_react_rag/langfuse_gradio.py @@ -10,6 +10,7 @@ import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from src.prompts import REACT_INSTRUCTIONS from src.utils import ( @@ -51,9 +52,14 @@ async def _main( ), ) - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=query) - + with ( + langfuse_client.start_as_current_observation( + name="Agents-SDK-Trace", as_type="agent", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): # Run the agent in streaming mode to get and display intermediate outputs result_stream = agents.Runner.run_streamed( main_agent, input=query, session=session @@ -64,7 +70,7 @@ async def _main( if len(turn_messages) > 0: yield turn_messages - span.update(output=result_stream.final_output) + obs.update(output=result_stream.final_output) pretty_print(turn_messages) yield turn_messages @@ -92,7 +98,7 @@ async def _main( [ "At which university did the SVP Software Engineering" " at Apple (as of June 2025) earn their engineering degree?", - ] + ], ], title="2.1: ReAct for Retrieval-Augmented Generation with OpenAI Agent SDK + LangFuse", ) diff --git a/src/2_frameworks/2_multi_agent/efficient.py b/src/2_frameworks/2_multi_agent/efficient.py index c6880a9..e1ec98d 100644 --- a/src/2_frameworks/2_multi_agent/efficient.py +++ b/src/2_frameworks/2_multi_agent/efficient.py @@ -13,6 +13,7 @@ import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from src.prompts import REACT_INSTRUCTIONS from src.utils import ( @@ -39,12 +40,20 @@ async def _main( session = get_or_create_session(history, session_state) # Use the main agent as the entry point- not the worker agent. - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=query) - + with ( + langfuse_client.start_as_current_observation( + name="Orchestrator-Worker", as_type="agent", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): # Run the agent in streaming mode to get and display intermediate outputs result_stream = agents.Runner.run_streamed( - main_agent, input=query, session=session + main_agent, + input=query, + session=session, + max_turns=30, # Increase max turns to support more complex queries ) async for _item in result_stream.stream_events(): @@ -52,7 +61,7 @@ async def _main( if len(turn_messages) > 0: yield turn_messages - span.update(output=result_stream.final_output) + obs.update(output=result_stream.final_output) if __name__ == "__main__": @@ -81,7 +90,11 @@ async def _main( instructions=( "You are a search agent. You receive a single search query as input. " "Use the search tool to perform a search, then produce a concise " - "'search summary' of the key findings. Do NOT return raw search results." + "'search summary' of the key findings. " + "For every fact you include in the summary, ALWAYS include a citation " + "both in-line and at the end of the summary as a numbered list. The " + "citation at the end should include relevant metadata from the search " + "results. Do NOT return raw search results. " ), tools=[ agents.function_tool(client_manager.knowledgebase.search_knowledgebase), @@ -118,12 +131,13 @@ async def _main( **COMMON_GRADIO_CONFIG, examples=[ [ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?" + "Write a structured report on the history of AI, covering: " + "1) the start in the 50s, 2) the first AI winter, 3) the second AI winter, " + "4) the modern AI boom, 5) the evolution of AI hardware, and " + "6) the societal impacts of modern AI" ], [ - "How does the annual growth in the 50th-percentile income " - "in the US compare with that in Canada?", + "Compare the box office performance of 'Oppenheimer' with the third Avatar movie" ], ], title="2.2.2: Multi-Agent Orchestrator-worker for Retrieval-Augmented Generation", diff --git a/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py b/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py index 5331685..1f3444f 100644 --- a/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py +++ b/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py @@ -7,6 +7,7 @@ import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from src.utils import ( oai_agent_stream_to_gradio_messages, @@ -36,9 +37,14 @@ async def _main( session = get_or_create_session(history, session_state) # Use the main agent as the entry point- not the worker agent. - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=query) - + with ( + langfuse_client.start_as_current_observation( + name="Orchestrator-Worker", as_type="agent", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): # Run the agent in streaming mode to get and display intermediate outputs result_stream = agents.Runner.run_streamed( main_agent, @@ -52,7 +58,7 @@ async def _main( if len(turn_messages) > 0: yield turn_messages - span.update(output=result_stream.final_output) + obs.update(output=result_stream.final_output) if __name__ == "__main__": @@ -173,6 +179,9 @@ async def _main( model=agents.OpenAIChatCompletionsModel( model=planner_model, openai_client=client_manager.openai_client ), + # NOTE: enabling parallel tool calls here can sometimes lead to issues with + # with invalid arguments being passed to the search agent. + model_settings=agents.ModelSettings(parallel_tool_calls=False), ) demo = gr.ChatInterface( @@ -180,19 +189,13 @@ async def _main( **COMMON_GRADIO_CONFIG, examples=[ [ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?" - ], - [ - "How does the annual growth in the 50th-percentile income " - "in the US compare with that in Canada?", + "Write a structured report on the history of AI, covering: " + "1) the start in the 50s, 2) the first AI winter, 3) the second AI winter, " + "4) the modern AI boom, 5) the evolution of AI hardware, and " + "6) the societal impacts of modern AI" ], [ - "Provide a complete list of all countries that have a population " - "over 100 million in 2026, that contain over 500 billion cubic meters " - "of internal fresh water for the year 2021, and have a mortality rate " - "less than the birth rate for the year 2021. The order of the list " - "should be based on the largest population size in 2026." + "Compare the box office performance of 'Oppenheimer' with the third Avatar movie" ], ], title="2.2.3: Multi-Agent Orchestrator-worker for Retrieval-Augmented Generation with Multiple Tools", diff --git a/src/2_frameworks/2_multi_agent/fan_out.py b/src/2_frameworks/2_multi_agent/fan_out.py index f189bac..b740013 100644 --- a/src/2_frameworks/2_multi_agent/fan_out.py +++ b/src/2_frameworks/2_multi_agent/fan_out.py @@ -241,7 +241,9 @@ async def process_document_pair(document_pair: DocumentPair) -> ConflictSummary Returns None if exception is encountered. """ - with langfuse_client.start_as_current_observation(name="Conflict- suggest") as span: + with langfuse_client.start_as_current_observation( + name="Conflict - suggest", as_type="agent" + ) as obs: try: result = await agents.Runner.run( worker_agent, input=document_pair.get_prompt() @@ -251,7 +253,7 @@ async def process_document_pair(document_pair: DocumentPair) -> ConflictSummary print(e) return None - span.update(input=document_pair, output=output) + obs.update(input=document_pair, output=output) return output @@ -283,7 +285,9 @@ async def process_one_review( Return None upon error. """ - with langfuse_client.start_as_current_observation(name="Review proposal") as span: + with langfuse_client.start_as_current_observation( + name="Review proposal", as_type="agent" + ) as obs: try: result = await agents.Runner.run( conflict_review_agent, input=conflicted_document.model_dump_json() @@ -293,7 +297,7 @@ async def process_one_review( print(e) return None - span.update(input=conflicted_document, output=output) + obs.update(input=conflicted_document, output=output) return output @@ -380,33 +384,43 @@ async def process_conflict_reviews( assert isinstance(dataset_dict, datasets.DatasetDict) documents = list(dataset_dict["train"])[: args.num_rows] - # Run O(N^2) agents on N documents to identify pairwise e.g., conflicts. - document_pairs = build_document_pairs(documents) # type: ignore[arg-type] - print(f"Built {len(document_pairs)} pair(s) from {len(documents)} document(s).") - - with langfuse_client.start_as_current_span(name="Conflicts- Pairwise") as span: - flagged_pairs = asyncio.get_event_loop().run_until_complete( - process_fan_out(document_pairs) - ) - span.update( - input=args.source_dataset, output=f"{len(flagged_pairs)} pairs identified." - ) - - # Collect conflicts related to each document. - # from O(N^2) pairs to O(N) summarized per-document conflicts. - conflicted_documents = group_conflicts(flagged_pairs) + with langfuse_client.start_as_current_observation( + name="Fan-Out", as_type="chain", input=args.source_dataset + ) as span: + # Run O(N^2) agents on N documents to identify pairwise e.g., conflicts. + document_pairs = build_document_pairs(documents) # type: ignore[arg-type] + print(f"Built {len(document_pairs)} pair(s) from {len(documents)} document(s).") + + with langfuse_client.start_as_current_observation( + name="Conflicts - Pairwise", as_type="chain" + ) as obs: + flagged_pairs = asyncio.get_event_loop().run_until_complete( + process_fan_out(document_pairs) + ) + obs.update( + input=args.source_dataset, + output=f"{len(flagged_pairs)} pairs identified.", + ) - # Review these O(N) per-document conflicts. - with langfuse_client.start_as_current_span(name="Conflicts- Review") as span: - conflict_reviews: list[ReviewedDocument] = ( - asyncio.get_event_loop().run_until_complete( - process_conflict_reviews(conflicted_documents) + # Collect conflicts related to each document. + # from O(N^2) pairs to O(N) summarized per-document conflicts. + conflicted_documents = group_conflicts(flagged_pairs) + + # Review these O(N) per-document conflicts. + with langfuse_client.start_as_current_observation( + name="Conflicts - Review", as_type="chain" + ) as obs: + conflict_reviews: list[ReviewedDocument] = ( + asyncio.get_event_loop().run_until_complete( + process_conflict_reviews(conflicted_documents) + ) ) - ) - span.update(input=conflicted_documents, output=conflict_reviews) + obs.update(input=conflicted_documents, output=conflict_reviews) + + # Generate markdown output + with open(args.output_report, "w") as output_file: + reports = [_review.get_report() for _review in conflict_reviews] + output_file.write("\n".join(reports)) + print(f"Writing report to {args.output_report}.") - # Generate markdown output - with open(args.output_report, "w") as output_file: - reports = [_review.get_report() for _review in conflict_reviews] - output_file.write("\n".join(reports)) - print(f"Writing report to {args.output_report}.") + span.update(output="Wrote report to " + args.output_report) diff --git a/src/2_frameworks/2_multi_agent/verbose.py b/src/2_frameworks/2_multi_agent/verbose.py index 16e3aa4..29d426d 100644 --- a/src/2_frameworks/2_multi_agent/verbose.py +++ b/src/2_frameworks/2_multi_agent/verbose.py @@ -1,4 +1,4 @@ -"""Multi-agent Planner-Researcher Setup via OpenAI Agents SDK. +"""Multi-agent Plan-and-Execute workflow via OpenAI Agents SDK. Note: this implementation does not unlock the full potential and flexibility of LLM agents. Use this reference implementation only if your use case requires @@ -14,6 +14,7 @@ import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from pydantic import BaseModel from src.utils import ( @@ -34,21 +35,32 @@ relevant information from a knowledge base to answer the question. \ As you are not able to clarify from the user what they are looking for, \ your search terms should be broad and cover various aspects of the query. \ -Output between 5 to 10 search terms to query the knowledge base. \ +Output up to 10 search terms to query the knowledge base. \ Note that the knowledge base is a Wikipedia dump and cuts off at May 2025. """ RESEARCHER_INSTRUCTIONS = """\ You are a research assistant with access to a knowledge base. \ -Given a potentially broad search term, use the search tool to \ -retrieve relevant information from the knowledge base and produce a short -summary of at most 300 words. +Given a potentially broad search term, your task is to use the search tool to \ +retrieve relevant information from the knowledge base and produce a short \ +summary of at most 300 words. You must pass the initial search term directly to \ +the search tool without any modifications and, only if necessary, refine your \ +search based on the results you get back. Your summary must be based solely on \ +a synthesis of all the search results and should not include any information that \ +is not present in the search results. For every fact you include in the summary, \ +ALWAYS include a citation both in-line and at the end of the summary as a numbered \ +list. The citation at the end should include relevant metadata from the search \ +results. Do NOT return raw search results. """ WRITER_INSTRUCTIONS = """\ You are an expert at synthesizing information and writing coherent reports. \ Given a user's query and a set of search summaries, synthesize these into a \ -coherent report (at least a few paragraphs long) that answers the user's question. \ +coherent report that answers the user's question. The length of the report should be \ +proportional to the complexity of the question. For queries that are more complex, \ +ensure that the report is well-structured, with clear sections and headings where \ +appropriate. Make sure to use the citations from the search summaries to back up \ +any factual claims you make. \ Do not make up any information outside of the search summaries. """ @@ -90,14 +102,8 @@ async def _create_search_plan( planner_agent: agents.Agent, query: str, session: agents.Session | None = None ) -> SearchPlan: """Create a search plan using the planner agent.""" - with langfuse_client.start_as_current_span( - name="create_search_plan", input=query - ) as planner_span: - response = await agents.Runner.run(planner_agent, input=query, session=session) - search_plan = response.final_output_as(SearchPlan) - planner_span.update(output=search_plan) - - return search_plan + response = await agents.Runner.run(planner_agent, input=query, session=session) + return response.final_output_as(SearchPlan) async def _generate_final_report( @@ -112,15 +118,14 @@ async def _generate_final_report( f"{i + 1}. {result}" for i, result in enumerate(search_results) ) - with langfuse_client.start_as_current_span( - name="generate_final_report", input=input_data - ) as writer_span: + with langfuse_client.start_as_current_observation( + name="Writer-Agent", as_type="chain", input=input_data + ) as obs: response = await agents.Runner.run( writer_agent, input=input_data, session=session ) - writer_span.update(output=response.final_output) - - return response + obs.update(output=response.final_output) + return response async def _main( @@ -136,11 +141,23 @@ async def _main( # previous turns in the conversation session = get_or_create_session(history, session_state) - with langfuse_client.start_as_current_span( - name="Multi-Agent-Trace", input=query - ) as agents_span: + with ( + langfuse_client.start_as_current_observation( + name="Plan-and-Execute-Workflow", as_type="chain", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): # Create a search plan - search_plan = await _create_search_plan(planner_agent, query, session=session) + with langfuse_client.start_as_current_observation( + name="Planner-Agent", as_type="chain", input=query + ) as planner_obs: + search_plan = await _create_search_plan( + planner_agent, query, session=session + ) + planner_obs.update(output=str(search_plan)) + turn_messages.append( ChatMessage( role="assistant", @@ -168,14 +185,17 @@ async def _main( # TODO: As an exercise, try to paralleize the execution of the search steps. search_results = [] for step in search_plan.search_steps: - with langfuse_client.start_as_current_span( - name="execute_search_step", input=step.search_term - ) as search_span: + with langfuse_client.start_as_current_observation( + name="Researcher-Agent", as_type="chain", input=step.search_term + ) as researcher_obs: response = await agents.Runner.run( - research_agent, input=step.search_term, session=session + research_agent, + input=step.search_term, + session=session, + max_turns=30, # Allow more turns for complex searches ) search_result: str = response.final_output - search_span.update(output=search_result) + researcher_obs.update(output=search_result) search_results.append(search_result) turn_messages += oai_agent_items_to_gradio_messages( @@ -187,9 +207,9 @@ async def _main( writer_agent_response = await _generate_final_report( writer_agent, search_results, query, session=session ) - agents_span.update(output=writer_agent_response.final_output) report = writer_agent_response.final_output_as(ResearchReport) + obs.update(output=report) turn_messages.append( ChatMessage( role="assistant", @@ -257,12 +277,13 @@ async def _main( **COMMON_GRADIO_CONFIG, examples=[ [ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?" + "Write a structured report on the history of AI, covering: " + "1) the start in the 50s, 2) the first AI winter, 3) the second AI winter, " + "4) the modern AI boom, 5) the evolution of AI hardware, and " + "6) the societal impacts of modern AI" ], [ - "How does the annual growth in the 50th-percentile income " - "in the US compare with that in Canada?", + "Compare the box office performance of 'Oppenheimer' with the third Avatar movie" ], ], title="2.2.1: Plan-and-Execute Multi-Agent System for Retrieval-Augmented Generation", diff --git a/src/2_frameworks/3_code_interpreter/app.py b/src/2_frameworks/3_code_interpreter/app.py index 0a97522..893b0a7 100644 --- a/src/2_frameworks/3_code_interpreter/app.py +++ b/src/2_frameworks/3_code_interpreter/app.py @@ -13,17 +13,19 @@ import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from src.utils import ( CodeInterpreter, oai_agent_stream_to_gradio_messages, - pretty_print, set_up_logging, ) from src.utils.agent_session import get_or_create_session from src.utils.client_manager import AsyncClientManager from src.utils.gradio import COMMON_GRADIO_CONFIG +from src.utils.langfuse.oai_sdk_setup import setup_langfuse_tracer from src.utils.langfuse.shared_client import langfuse_client +from src.utils.pretty_printing import pretty_print CODE_INTERPRETER_INSTRUCTIONS = """\ @@ -35,7 +37,10 @@ Instead of asking the user for file inputs, you should try to find the file \ using this tool. -Recommended packages: Pandas, Numpy, SymPy, Scikit-learn. +Recommended packages: Pandas, Numpy, SymPy, Scikit-learn, Matplotlib, Seaborn. + +Use Matplotlib to create visualizations. Make sure to call `plt.show()` so that +the plot is captured and returned to the user. You can also run Jupyter-style shell commands (e.g., `!pip freeze`) but you won't be able to install packages. @@ -54,9 +59,14 @@ async def _main( # previous turns in the conversation session = get_or_create_session(history, session_state) - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=query) - + with ( + langfuse_client.start_as_current_observation( + name="Code-Interpreter-Agent", as_type="agent", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): # Run the agent in streaming mode to get and display intermediate outputs result_stream = agents.Runner.run_streamed( main_agent, input=query, session=session @@ -67,7 +77,7 @@ async def _main( if len(turn_messages) > 0: yield turn_messages - span.update(output=result_stream.final_output) + obs.update(output=result_stream.final_output) pretty_print(turn_messages) yield turn_messages @@ -80,6 +90,7 @@ async def _main( load_dotenv(verbose=True) set_up_logging() + setup_langfuse_tracer() # Initialize client manager # This class initializes the OpenAI and Weaviate async clients, as well as the @@ -100,8 +111,7 @@ async def _main( instructions=CODE_INTERPRETER_INSTRUCTIONS, tools=[ agents.function_tool( - code_interpreter.run_code, - name_override="code_interpreter", + code_interpreter.run_code, name_override="code_interpreter" ) ], model=agents.OpenAIChatCompletionsModel( diff --git a/src/2_frameworks/4_mcp/app.py b/src/2_frameworks/4_mcp/app.py index 0f4a92f..d3e71b9 100644 --- a/src/2_frameworks/4_mcp/app.py +++ b/src/2_frameworks/4_mcp/app.py @@ -12,6 +12,7 @@ from agents.mcp import MCPServerStdio, create_static_tool_filter from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage +from langfuse import propagate_attributes from src.utils import ( oai_agent_stream_to_gradio_messages, @@ -21,6 +22,7 @@ from src.utils.agent_session import get_or_create_session from src.utils.client_manager import AsyncClientManager from src.utils.gradio import COMMON_GRADIO_CONFIG +from src.utils.langfuse.oai_sdk_setup import setup_langfuse_tracer from src.utils.langfuse.shared_client import langfuse_client @@ -43,9 +45,14 @@ async def _main( ["git", "rev-parse", "--show-toplevel"], text=True ).strip() - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=query) - + with ( + langfuse_client.start_as_current_observation( + name="Git-Agent", as_type="agent", input=query + ) as obs, + propagate_attributes( + session_id=session.session_id # Propagate session_id to all child observations + ), + ): async with MCPServerStdio( name="Git server", params={ @@ -74,7 +81,7 @@ async def _main( if len(turn_messages) > 0: yield turn_messages - span.update(output=result_stream.final_output) + obs.update(output=result_stream.final_output) pretty_print(turn_messages) yield turn_messages @@ -87,6 +94,7 @@ async def _main( load_dotenv(verbose=True) set_up_logging() + setup_langfuse_tracer() # Initialize client manager # This class initializes the OpenAI and Weaviate async clients, as well as the diff --git a/src/3_evals/2_synthetic_data/synthesize_data.py b/src/3_evals/2_synthetic_data/synthesize_data.py index 74a9799..fe02a37 100644 --- a/src/3_evals/2_synthetic_data/synthesize_data.py +++ b/src/3_evals/2_synthetic_data/synthesize_data.py @@ -95,13 +95,14 @@ async def generate_synthetic_test_cases( ), ) - with langfuse_client.start_as_current_span(name="generate_synthetic_test_cases"): + with langfuse_client.start_as_current_observation( + name="generate_synthetic_test_cases", as_type="agent" + ): raw_response = await agents.Runner.run( test_case_generator_agent, input="Generate test question-answer pairs based on this news event: \n" + news_event.model_dump_json(indent=2), ) - print(raw_response.final_output) structured_response = await agents.Runner.run( structured_output_agent, input=raw_response.final_output, diff --git a/src/3_evals/2_synthetic_data/synthesize_data_e2b.py b/src/3_evals/2_synthetic_data/synthesize_data_e2b.py index d01f528..1f1d5dd 100644 --- a/src/3_evals/2_synthetic_data/synthesize_data_e2b.py +++ b/src/3_evals/2_synthetic_data/synthesize_data_e2b.py @@ -93,8 +93,8 @@ async def generate_synthetic_test_cases( ), ) - with langfuse_client.start_as_current_span( - name="generate_synthetic_test_cases" + with langfuse_client.start_as_current_observation( + name="generate_synthetic_test_cases", as_type="agent" ): raw_response = await agents.Runner.run( test_case_generator_agent, diff --git a/src/utils/langfuse/oai_sdk_setup.py b/src/utils/langfuse/oai_sdk_setup.py index 7458206..8432cc3 100644 --- a/src/utils/langfuse/oai_sdk_setup.py +++ b/src/utils/langfuse/oai_sdk_setup.py @@ -5,6 +5,7 @@ """ import logfire +import nest_asyncio from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider @@ -15,6 +16,7 @@ def configure_oai_agents_sdk(service_name: str) -> None: """Register Langfuse as tracing provider for OAI Agents SDK.""" + nest_asyncio.apply() logfire.configure(service_name=service_name, send_to_logfire=False, scrubbing=False) logfire.instrument_openai_agents() From 9c839d48cf687be34191a19d9e3eb898440ef7f4 Mon Sep 17 00:00:00 2001 From: fcogidi <41602287+fcogidi@users.noreply.github.com> Date: Fri, 16 Jan 2026 22:52:05 -0500 Subject: [PATCH 2/2] Update pyasn1 to version 0.6.2 and update associated URLs in uv.lock --- uv.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/uv.lock b/uv.lock index 0f3b160..77a816c 100644 --- a/uv.lock +++ b/uv.lock @@ -3763,11 +3763,11 @@ wheels = [ [[package]] name = "pyasn1" -version = "0.6.1" +version = "0.6.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fe/b6/6e630dff89739fcd427e3f72b3d905ce0acb85a45d4ec3e2678718a3487f/pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b", size = 146586, upload-time = "2026-01-16T18:04:18.534Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" }, + { url = "https://files.pythonhosted.org/packages/44/b5/a96872e5184f354da9c84ae119971a0a4c221fe9b27a4d94bd43f2596727/pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf", size = 83371, upload-time = "2026-01-16T18:04:17.174Z" }, ] [[package]]