diff --git a/.github/workflows/code_checks.yml b/.github/workflows/code_checks.yml index 20a79fa..9eb62d1 100644 --- a/.github/workflows/code_checks.yml +++ b/.github/workflows/code_checks.yml @@ -56,4 +56,4 @@ jobs: with: virtual-environment: .venv/ ignore-vulns: | - GHSA-2qfp-q593-8484 + GHSA-xm59-rqc7-hhvf diff --git a/pyproject.toml b/pyproject.toml index 30e8c8a..c1a1a4e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -9,19 +9,18 @@ requires-python = ">=3.12" dependencies = [ "aiohttp>=3.12.14", "beautifulsoup4>=4.13.4", - "datasets>=3.6.0", - "e2b-code-interpreter>=1.5.2", - "gradio>=5.37.0", - "langfuse>=3.1.3", + "datasets>=4.4.0", + "e2b-code-interpreter>=2.3.0", + "gradio>=6.1.0", + "langfuse>=3.9.0", "lxml>=6.0.0", "nest-asyncio>=1.6.0", "numpy<2.3.0", - "openai>=1.93.1", - "openai-agents>=0.1.0", + "openai>=2.6.0", + "openai-agents>=0.4.0", "plotly>=6.2.0", "pydantic>=2.11.7", "pydantic-ai-slim[logfire]>=0.3.7", - "pytest-asyncio>=0.25.2", "scikit-learn>=1.7.0", "weaviate-client>=4.15.4", ] @@ -46,8 +45,8 @@ dev = [ "pip-audit>=2.7.3", "pre-commit>=4.1.0", "pytest>=8.3.4", - "pytest-asyncio>=0.25.2", - "pytest-cov>=6.0.0", + "pytest-asyncio>=1.2.0", + "pytest-cov>=7.0.0", "pytest-mock>=3.14.0", "ruff>=0.12.2", "transformers>=4.54.1", diff --git a/src/1_basics/0_search_demo/app.py b/src/1_basics/0_search_demo/app.py index 00543be..4c29510 100644 --- a/src/1_basics/0_search_demo/app.py +++ b/src/1_basics/0_search_demo/app.py @@ -1,20 +1,16 @@ """Knowledge Base Search Demo using Gradio.""" +import asyncio + import gradio as gr from dotenv import load_dotenv -from openai import AsyncOpenAI -from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, - pretty_print, -) +from src.utils import AsyncClientManager, pretty_print DESCRIPTION = """\ In the example below, your goal is to find out where \ -Apple's SVP Software Engineering got his degree in engineering- \ +Apple's SVP Software Engineering got his degree in engineering - \ without knowing the full name of that person ahead of time. \ \ Did you see why traditional RAG systems like this one \ @@ -24,51 +20,46 @@ \ The output format you see is also what the Agent LLM \ would receive when interacting with the knowledge base search \ -tool in subsequent sections of this bootcamp- both when using \ +tool in subsequent sections of this bootcamp - both when using \ the Wikipedia database we provided and when using your own \ public dataset. """ -load_dotenv(verbose=True) - -configs = Configs() -async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, -) -async_openai_client = AsyncOpenAI() -async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, -) - - async def search_and_pretty_format(keyword: str) -> str: """Search knowledgebase and pretty-format output.""" - output = await async_knowledgebase.search_knowledgebase(keyword) + output = await client_manager.knowledgebase.search_knowledgebase(keyword) return pretty_print(output) -json_codeblock = gr.Code(language="json", wrap_lines=True) +if __name__ == "__main__": + load_dotenv(verbose=True) + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() -demo = gr.Interface( - fn=search_and_pretty_format, - inputs=["text"], - outputs=[json_codeblock], - title="1.0: Knowledge Base Search Demo", - description=DESCRIPTION, - examples=[ - "Apple SVP Software Engineering", - "Craig Federighi", - "Apple SVP Software Engineering academic background", - "Craig Federighi academic background", - ], -) + # Gradio UI + # The UI consists of a text input for the search keyword + # and a code block to display the JSON-formatted search results. + demo = gr.Interface( + fn=search_and_pretty_format, + inputs=["text"], + outputs=[gr.Code(language="json", wrap_lines=True)], + title="1.0: Knowledge Base Search Demo", + description=DESCRIPTION, + examples=[ + "Apple SVP Software Engineering academic background", + "Apple SVP Software Engineering", + "Craig Federighi", + "Craig Federighi academic background", + ], + ) -demo.launch(share=True) + try: + demo.launch(share=True) + finally: + # Ensure clients are closed on exit + asyncio.run(client_manager.close()) diff --git a/src/1_basics/1_react_rag/app.py b/src/1_basics/1_react_rag/app.py index c220ad1..354ca46 100644 --- a/src/1_basics/1_react_rag/app.py +++ b/src/1_basics/1_react_rag/app.py @@ -4,26 +4,22 @@ """ import asyncio -import contextlib import json -import signal -import sys +from typing import TYPE_CHECKING, Any, AsyncGenerator import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI -from openai.types.chat import ChatCompletionSystemMessageParam, ChatCompletionToolParam from src.prompts import REACT_INSTRUCTIONS -from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, -) +from src.utils.client_manager import AsyncClientManager -load_dotenv(verbose=True) +if TYPE_CHECKING: + from openai.types.chat import ( + ChatCompletionSystemMessageParam, + ChatCompletionToolParam, + ) MAX_TURNS = 5 @@ -55,29 +51,24 @@ } -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -async def react_rag(query: str, history: list[ChatMessage]): +async def react_rag( + query: str, history: list[ChatMessage] +) -> AsyncGenerator[list[ChatMessage], Any]: """Handle ReAct RAG chat for knowledgebase-augmented agents.""" + # Flag to track if the agent has provided a final response + # If the agent exhausts all reasoning steps without a final answer, + # we make one last call to get a final response based on the information available. + agent_responded = False + + # Construct chat completion messages to pass to LLM oai_messages = [system_message, {"role": "user", "content": query}] for _ in range(MAX_TURNS): - completion = await async_openai_client.chat.completions.create( - model=configs.default_planner_model, + # Call OpenAI chat completions with tools enabled + completion = await client_manager.openai_client.chat.completions.create( + model=client_manager.configs.default_worker_model, messages=oai_messages, - tools=tools, - reasoning_effort=None, + tools=tools, # This makes the tool defined above available to the LLM ) # Print assistant output @@ -87,20 +78,25 @@ async def react_rag(query: str, history: list[ChatMessage]): # Execute tool calls and send results back to LLM if requested. # Otherwise, stop, as the conversation would have been finished. tool_calls = message.tool_calls + + if tool_calls is None: # No tool calls, assume final response + history.append(ChatMessage(content=message.content or "", role="assistant")) + agent_responded = True + yield history + break + history.append( ChatMessage( - content=message.content or "", role="assistant", + content=message.content or "", + metadata={"title": "🧠 Thought"}, ) ) - - if tool_calls is None: - yield history - break + yield history for tool_call in tool_calls: arguments = json.loads(tool_call.function.arguments) - results = await async_knowledgebase.search_knowledgebase( + results = await client_manager.knowledgebase.search_knowledgebase( arguments["keyword"] ) results_serialized = json.dumps( @@ -117,46 +113,65 @@ async def react_rag(query: str, history: list[ChatMessage]): history.append( ChatMessage( role="assistant", - content=results_serialized, + content=f"```\n{results_serialized}\n```", metadata={ - "title": f"Used tool {tool_call.function.name}", + "title": f"🛠️ Used tool `{tool_call.function.name}`", "log": f"Arguments: {arguments}", + "status": "done", # This makes it collapsed by default }, ) ) yield history + if not agent_responded: + # Make one final LLM call to get a response given the history + oai_messages.append( + { + "role": "system", + "content": ( + "You have reached the maximum number of allowed reasoning " + "steps. Provide a final answer based on the information available." + ), + } + ) + completion = await client_manager.openai_client.chat.completions.create( + model=client_manager.configs.default_planner_model, + messages=oai_messages, + ) + message = completion.choices[0].message + history.append(ChatMessage(content=message.content or "", role="assistant")) + oai_messages.pop() # Remove the last system message for next iteration + oai_messages.append(message) # Append the final message to history + yield history -demo = gr.ChatInterface( - react_rag, - title="1.1 ReAct Agent for Retrieval-Augmented Generation", - type="messages", - examples=[ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?", - ], -) if __name__ == "__main__": - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, + load_dotenv(verbose=True) + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + demo = gr.ChatInterface( + react_rag, + chatbot=gr.Chatbot(height=600), + textbox=gr.Textbox(lines=1, placeholder="Enter your prompt"), + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?" + ], + [ + "Où le vice-président senior actuel d'Apple en charge de l'ingénierie " + "logicielle a-t-il obtenu son diplôme d'ingénieur?" + ], + ], + title="1.1: ReAct Agent for Retrieval-Augmented Generation", ) - async_openai_client = AsyncOpenAI() - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) - - signal.signal(signal.SIGINT, _handle_sigint) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/1_basics/1_react_rag/cli.py b/src/1_basics/1_react_rag/cli.py index e6a432d..007e466 100644 --- a/src/1_basics/1_react_rag/cli.py +++ b/src/1_basics/1_react_rag/cli.py @@ -2,17 +2,13 @@ import asyncio import json -import sys from typing import TYPE_CHECKING from dotenv import load_dotenv -from openai import AsyncOpenAI from src.prompts import REACT_INSTRUCTIONS from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, + AsyncClientManager, pretty_print, ) @@ -20,7 +16,6 @@ if TYPE_CHECKING: from openai.types.chat import ChatCompletionToolParam -load_dotenv(verbose=True) MAX_TURNS = 5 @@ -47,22 +42,12 @@ ] -async def _main(): - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_openai_client = AsyncOpenAI() - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) +async def _main() -> None: + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() messages: list = [ { @@ -76,11 +61,19 @@ async def _main(): }, ] + # Show initial system prompt and user query + print("System prompt: \n", REACT_INSTRUCTIONS) + print("User query: ") + pretty_print(messages[-1]["content"]) + try: while True: + # Flag to track if final response is given + agent_responded = False + for _ in range(MAX_TURNS): - completion = await async_openai_client.chat.completions.create( - model=configs.default_planner_model, + completion = await client_manager.openai_client.chat.completions.create( + model=client_manager.configs.default_worker_model, messages=messages, tools=tools, ) @@ -93,11 +86,18 @@ async def _main(): # Execute function calls if requested. if tool_calls is not None: + # Show thought that led to tool call + print("\nAgent Thought: ") + pretty_print(message.content) + for tool_call in tool_calls: + print("\nAgent Action: ") pretty_print(tool_call) arguments = json.loads(tool_call.function.arguments) - results = await async_knowledgebase.search_knowledgebase( - arguments["keyword"] + results = ( + await client_manager.knowledgebase.search_knowledgebase( + arguments["keyword"] + ) ) messages.append( @@ -109,19 +109,49 @@ async def _main(): ), } ) + print("\nAgent Observation: ") + pretty_print(results) # Otherwise, print final response and stop. - else: - pretty_print(message.content) + elif message.content is not None: + print("\nAgent final response: \n", message.content) + agent_responded = True break - pretty_print(messages) + if not agent_responded: + # Add message letting the agent know max turns reached + messages.append( + { + "role": "system", + "content": ( + "You have reached the maximum number of allowed reasoning " + "steps. Provide a final answer based on the information available." + ), + } + ) + + # Make one final LLM call to get a response given the history + completion = await client_manager.openai_client.chat.completions.create( + model=client_manager.configs.default_worker_model, + messages=messages, + ) + message = completion.choices[0].message + print( + "\nAgent final response (after max turns): \n", + message.content, + ) + + # Remove the last system message for next iteration + messages.pop() + + # Append the final message to history + messages.append(message) # Get new user input - timeout_secs = 60 + timeout_secs = 300 try: user_input = await asyncio.wait_for( - asyncio.to_thread(input, "Ask a question: "), + asyncio.to_thread(input, "Enter your prompt: "), timeout=timeout_secs, ) except asyncio.TimeoutError: @@ -135,10 +165,10 @@ async def _main(): messages.append({"role": "user", "content": user_input}) finally: - await async_weaviate_client.close() - await async_openai_client.close() - sys.exit(0) + await client_manager.close() if __name__ == "__main__": + load_dotenv(verbose=True) + asyncio.run(_main()) diff --git a/src/2_frameworks/1_react_rag/app.py b/src/2_frameworks/1_react_rag/app.py index 40f1360..744a99d 100644 --- a/src/2_frameworks/1_react_rag/app.py +++ b/src/2_frameworks/1_react_rag/app.py @@ -1,96 +1,86 @@ """Reason-and-Act Knowledge Retrieval Agent via the OpenAI Agent SDK.""" import asyncio -import contextlib import logging -import signal -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.prompts import REACT_INSTRUCTIONS -from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, - oai_agent_stream_to_gradio_messages, -) +from src.utils import oai_agent_stream_to_gradio_messages +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG -load_dotenv(verbose=True) +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) -logging.basicConfig(level=logging.INFO) - - -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -async def _main(question: str, gr_messages: list[ChatMessage]): + # Define an agent using the OpenAI Agent SDK main_agent = agents.Agent( - name="Wikipedia Agent", - instructions=REACT_INSTRUCTIONS, - tools=[agents.function_tool(async_knowledgebase.search_knowledgebase)], + name="Wikipedia Agent", # Agent name for logging and debugging purposes + instructions=REACT_INSTRUCTIONS, # System instructions for the agent + # Tools available to the agent + # We wrap the `search_knowledgebase` method with `function_tool`, which + # will construct the tool definition JSON schema by extracting the necessary + # information from the method signature and docstring. + tools=[agents.function_tool(client_manager.knowledgebase.search_knowledgebase)], model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_worker_model, + openai_client=client_manager.openai_client, ), - model_settings=agents.ModelSettings(parallel_tool_calls=True), ) - result_stream = agents.Runner.run_streamed(main_agent, input=question) - async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages + # Run the agent in streaming mode to get and display intermediate outputs + result_stream = agents.Runner.run_streamed(main_agent, input=query, session=session) - -demo = gr.ChatInterface( - _main, - title="2.1 OAI Agent SDK ReAct", - type="messages", - examples=[ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?", - ], -) + async for _item in result_stream.stream_events(): + # Parse the stream events, convert to Gradio chat messages and append to + # the chat history + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages if __name__ == "__main__": - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) + load_dotenv(verbose=True) + logging.basicConfig(level=logging.INFO) + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() - async_openai_client = AsyncOpenAI() + # Disable tracing to OpenAI platform since we are using Gemini models instead + # of OpenAI models agents.set_tracing_disabled(disabled=True) - signal.signal(signal.SIGINT, _handle_sigint) + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?" + ], + ], + title="2.1: ReAct for Retrieval-Augmented Generation with OpenAI Agent SDK", + ) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/1_react_rag/cli.py b/src/2_frameworks/1_react_rag/cli.py index f099016..f755b7d 100644 --- a/src/2_frameworks/1_react_rag/cli.py +++ b/src/2_frameworks/1_react_rag/cli.py @@ -11,46 +11,20 @@ function_tool, ) from dotenv import load_dotenv -from openai import AsyncOpenAI from src.prompts import REACT_INSTRUCTIONS -from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, - pretty_print, -) - - -load_dotenv(verbose=True) - -no_tracing_config = RunConfig(tracing_disabled=True) - - -async def _main(query: str): - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) +from src.utils import pretty_print +from src.utils.client_manager import AsyncClientManager - async_openai_client = AsyncOpenAI() +async def _main(query: str) -> None: wikipedia_agent = Agent( name="Wikipedia Agent", instructions=REACT_INSTRUCTIONS, - tools=[function_tool(async_knowledgebase.search_knowledgebase)], + tools=[function_tool(client_manager.knowledgebase.search_knowledgebase)], model=OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_worker_model, + openai_client=client_manager.openai_client, ), ) @@ -77,15 +51,23 @@ async def _main(query: str): # if len(event_parsed) > 0: # pretty_print(event_parsed) - await async_weaviate_client.close() - await async_openai_client.close() - if __name__ == "__main__": + load_dotenv(verbose=True) + + logging.basicConfig(level=logging.INFO) + + no_tracing_config = RunConfig(tracing_disabled=True) + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + query = ( "At which university did the SVP Software Engineering" " at Apple (as of June 2025) earn their engineering degree?" ) - logging.basicConfig(level=logging.INFO) asyncio.run(_main(query)) diff --git a/src/2_frameworks/1_react_rag/langfuse_gradio.py b/src/2_frameworks/1_react_rag/langfuse_gradio.py index 4d3b16e..156bcab 100644 --- a/src/2_frameworks/1_react_rag/langfuse_gradio.py +++ b/src/2_frameworks/1_react_rag/langfuse_gradio.py @@ -4,106 +4,100 @@ """ import asyncio -import contextlib -import signal -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.prompts import REACT_INSTRUCTIONS from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, oai_agent_stream_to_gradio_messages, pretty_print, set_up_logging, setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client -load_dotenv(verbose=True) +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] -set_up_logging() - - -configs = Configs() -async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, -) -async_openai_client = AsyncOpenAI() -async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, -) - - -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -async def _main(question: str, gr_messages: list[ChatMessage]): - setup_langfuse_tracer() + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) + # Define an agent using the OpenAI Agent SDK main_agent = agents.Agent( - name="Wikipedia Agent", - instructions=REACT_INSTRUCTIONS, - tools=[agents.function_tool(async_knowledgebase.search_knowledgebase)], + name="Wikipedia Agent", # Agent name for logging and debugging purposes + instructions=REACT_INSTRUCTIONS, # System instructions for the agent + # Tools available to the agent + # We wrap the `search_knowledgebase` method with `function_tool`, which + # will construct the tool definition JSON schema by extracting the necessary + # information from the method signature and docstring. + tools=[agents.function_tool(client_manager.knowledgebase.search_knowledgebase)], model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_worker_model, + openai_client=client_manager.openai_client, ), ) with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=question) + span.update(input=query) + + # Run the agent in streaming mode to get and display intermediate outputs + result_stream = agents.Runner.run_streamed( + main_agent, input=query, session=session + ) - result_stream = agents.Runner.run_streamed(main_agent, input=question) async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages span.update(output=result_stream.final_output) - pretty_print(gr_messages) - yield gr_messages + pretty_print(turn_messages) + yield turn_messages -demo = gr.ChatInterface( - _main, - title="2.1 OAI Agent SDK ReAct + LangFuse", - type="messages", - examples=[ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?", - ], -) +if __name__ == "__main__": + load_dotenv(verbose=True) + # Set logging level and suppress some noisy logs from dependencies + set_up_logging() -if __name__ == "__main__": - signal.signal(signal.SIGINT, _handle_sigint) + # Set up LangFuse for tracing + setup_langfuse_tracer() + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?", + ] + ], + title="2.1: ReAct for Retrieval-Augmented Generation with OpenAI Agent SDK + LangFuse", + ) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/2_multi_agent/efficient.py b/src/2_frameworks/2_multi_agent/efficient.py index 0887921..c6880a9 100644 --- a/src/2_frameworks/2_multi_agent/efficient.py +++ b/src/2_frameworks/2_multi_agent/efficient.py @@ -7,138 +7,129 @@ """ import asyncio -import contextlib -import signal -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.prompts import REACT_INSTRUCTIONS from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, oai_agent_stream_to_gradio_messages, set_up_logging, setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client -load_dotenv(verbose=True) +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] -set_up_logging() - - -configs = Configs() -async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, -) -async_openai_client = AsyncOpenAI() -async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, -) - -AGENT_LLM_NAMES = { - "worker": configs.default_worker_model, # less expensive, - "planner": configs.default_planner_model, # more expensive, better at reasoning and planning -} - - -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -# Worker Agent: handles long context efficiently -search_agent = agents.Agent( - name="SearchAgent", - instructions=( - "You are a search agent. You receive a single search query as input. " - "Use the search tool to perform a search, then produce a concise " - "'search summary' of the key findings. Do NOT return raw search results." - ), - tools=[ - agents.function_tool(async_knowledgebase.search_knowledgebase), - ], - # a faster, smaller model for quick searches - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["worker"], openai_client=async_openai_client - ), -) - -# Main Agent: more expensive and slower, but better at complex planning -main_agent = agents.Agent( - name="MainAgent", - instructions=REACT_INSTRUCTIONS, - # Allow the planner agent to invoke the worker agent. - # The long context provided to the worker agent is hidden from the main agent. - tools=[ - search_agent.as_tool( - tool_name="search", - tool_description="Perform a web search for a query and return a concise summary.", - ) - ], - # a larger, more capable model for planning and reasoning over summaries - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["planner"], openai_client=async_openai_client - ), -) - - -async def _main(question: str, gr_messages: list[ChatMessage]): - setup_langfuse_tracer() + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) # Use the main agent as the entry point- not the worker agent. with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=question) + span.update(input=query) + + # Run the agent in streaming mode to get and display intermediate outputs + result_stream = agents.Runner.run_streamed( + main_agent, input=query, session=session + ) - result_stream = agents.Runner.run_streamed(main_agent, input=question) async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages span.update(output=result_stream.final_output) -demo = gr.ChatInterface( - _main, - title="2.2 Multi-Agent for Efficiency", - type="messages", - examples=[ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?", - "How does the annual growth in the 50th-percentile income " - "in the US compare with that in Canada?", - ], -) - if __name__ == "__main__": - async_openai_client = AsyncOpenAI() + load_dotenv(verbose=True) + + # Set logging level and suppress some noisy logs from dependencies + set_up_logging() + + # Set up LangFuse for tracing + setup_langfuse_tracer() - signal.signal(signal.SIGINT, _handle_sigint) + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + # Use smaller, faster model for focused search tasks + worker_model = client_manager.configs.default_worker_model + # Use larger, more capable model for complex planning and reasoning + planner_model = client_manager.configs.default_planner_model + + # Worker Agent: handles long context efficiently + search_agent = agents.Agent( + name="SearchAgent", + instructions=( + "You are a search agent. You receive a single search query as input. " + "Use the search tool to perform a search, then produce a concise " + "'search summary' of the key findings. Do NOT return raw search results." + ), + tools=[ + agents.function_tool(client_manager.knowledgebase.search_knowledgebase), + ], + # a faster, smaller model for quick searches + model=agents.OpenAIChatCompletionsModel( + model=worker_model, openai_client=client_manager.openai_client + ), + ) + + # Main Agent: more expensive and slower, but better at complex planning + main_agent = agents.Agent( + name="MainAgent", + instructions=REACT_INSTRUCTIONS, + # Allow the planner agent to invoke the worker agent. + # The long context provided to the worker agent is hidden from the main agent. + tools=[ + search_agent.as_tool( + tool_name="search_knowledgebase", + tool_description="Perform a search on a Wikipedia knowledge base for a query and return a concise summary.", + ) + ], + # a larger, more capable model for planning and reasoning over summaries + model=agents.OpenAIChatCompletionsModel( + model=planner_model, openai_client=client_manager.openai_client + ), + # NOTE: enabling parallel tool calls here can sometimes lead to issues with + # with invalid arguments being passed to the search agent. + model_settings=agents.ModelSettings(parallel_tool_calls=False), + ) + + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?" + ], + [ + "How does the annual growth in the 50th-percentile income " + "in the US compare with that in Canada?", + ], + ], + title="2.2.2: Multi-Agent Orchestrator-worker for Retrieval-Augmented Generation", + ) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py b/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py index 3847a17..5331685 100644 --- a/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py +++ b/src/2_frameworks/2_multi_agent/efficient_multiple_kbs.py @@ -1,24 +1,21 @@ """Example code for planner-worker agent collaboration with multiple tools.""" import asyncio -import contextlib -import signal -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, oai_agent_stream_to_gradio_messages, set_up_logging, setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client from src.utils.tools.gemini_grounding import ( GeminiGroundingWithGoogleSearch, @@ -26,178 +23,182 @@ ) -load_dotenv(verbose=True) +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] -set_up_logging() - - -configs = Configs() -async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, -) -async_openai_client = AsyncOpenAI() -async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, -) - -AGENT_LLM_NAMES = { - "worker": configs.default_worker_model, # less expensive, - "planner": configs.default_planner_model, # more expensive, better at reasoning and planning -} - -gemini_grounding_tool = GeminiGroundingWithGoogleSearch( - model_settings=ModelSettings(model=AGENT_LLM_NAMES["worker"]) -) - - -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -# Worker Agent: handles long context efficiently -kb_agent = agents.Agent( - name="KnowledgeBaseAgent", - instructions=""" - You are an agent specialized in searching a knowledge base. - You will receive a single search query as input. - Use the 'search_knowledgebase' tool to perform a search, then return a - JSON object with: - - 'summary': a concise synthesis of the retrieved information in your own words - - 'sources': a list of citations with {type: "kb", title: "...", section: "..."} - - 'no_results': true/false - - If the tool returns no matches, set "no_results": true and keep "sources" empty. - Do NOT make up information. Do NOT return raw search results or long quotes. - """, - tools=[ - agents.function_tool(async_knowledgebase.search_knowledgebase), - ], - # a faster, smaller model for quick searches - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["worker"], openai_client=async_openai_client - ), -) - -# Main Agent: more expensive and slower, but better at complex planning -main_agent = agents.Agent( - name="MainAgent", - instructions=""" - You are a deep research agent and your goal is to conduct in-depth, multi-turn - research by breaking down complex queries, using the provided tools, and - synthesizing the information into a comprehensive report. - - You have access to the following tools: - 1. 'search_knowledgebase' - use this tool to search for information in a - knowledge base. The knowledge base reflects a subset of Wikipedia as - of May 2025. - 2. 'get_web_search_grounded_response' - use this tool for current events, - news, fact-checking or when the information in the knowledge base is - not sufficient to answer the question. - - Both tools will not return raw search results or the sources themselves. - Instead, they will return a concise summary of the key findings, along - with the sources used to generate the summary. - - For best performance, divide complex queries into simpler sub-queries - Before calling either tool, always explain your reasoning for doing so. - - Note that the 'get_web_search_grounded_response' tool will expand the query - into multiple search queries and execute them. It will also return the - queries it executed. Do not repeat them. - - **Routing Guidelines:** - - When answering a question, you should first try to use the 'search_knowledgebase' - tool, unless the question requires recent information after May 2025 or - has explicit recency cues. - - If either tool returns insufficient information for a given query, try - reformulating or using the other tool. You can call either tool multiple - times to get the information you need to answer the user's question. - - **Guidelines for synthesis** - - After collecting results, write the final answer from your own synthesis. - - Add a "Sources" section listing unique sources, formatted as: - [1] Publisher - URL - [2] Wikipedia: (Section:
) - Order by first mention in your text. Every factual sentence in your final - response must map to at least one source. - - If web and knowledge base disagree, surface the disagreement and prefer sources - with newer publication dates. - - Do not invent URLs or sources. - - If both tools fail, say so and suggest 2–3 refined queries. - - Be sure to mention the sources in your response, including the URL if available, - and do not make up information. - """, - # Allow the planner agent to invoke the worker agent. - # The long context provided to the worker agent is hidden from the main agent. - tools=[ - kb_agent.as_tool( - tool_name="search_knowledgebase", - tool_description=( - "Search the knowledge base for a query and return a concise summary " - "of the key findings, along with the sources used to generate " - "the summary" - ), - ), - agents.function_tool(gemini_grounding_tool.get_web_search_grounded_response), - ], - # a larger, more capable model for planning and reasoning over summaries - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["planner"], openai_client=async_openai_client - ), -) - - -async def _main(question: str, gr_messages: list[ChatMessage]): - setup_langfuse_tracer() + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) # Use the main agent as the entry point- not the worker agent. with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=question) + span.update(input=query) + + # Run the agent in streaming mode to get and display intermediate outputs + result_stream = agents.Runner.run_streamed( + main_agent, + input=query, + session=session, + max_turns=30, # Increase max turns to support more complex queries + ) - result_stream = agents.Runner.run_streamed(main_agent, input=question) async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages span.update(output=result_stream.final_output) -demo = gr.ChatInterface( - _main, - title="2.3 Multi-Agent with Multiple Search Tools", - type="messages", - examples=[ - "At which university did the SVP Software Engineering" - " at Apple (as of June 2025) earn their engineering degree?", - "How does the annual growth in the 50th-percentile income " - "in the US compare with that in Canada?", - ], -) - if __name__ == "__main__": - async_openai_client = AsyncOpenAI() + load_dotenv(verbose=True) + + # Set logging level and suppress some noisy logs from dependencies + set_up_logging() - signal.signal(signal.SIGINT, _handle_sigint) + # Set up LangFuse for tracing + setup_langfuse_tracer() + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + # Use smaller, faster model for focused search tasks + worker_model = client_manager.configs.default_worker_model + # Use larger, more capable model for complex planning and reasoning + planner_model = client_manager.configs.default_planner_model + + gemini_grounding_tool = GeminiGroundingWithGoogleSearch( + model_settings=ModelSettings(model=worker_model) + ) + + # Worker Agent: handles long context efficiently + kb_agent = agents.Agent( + name="KnowledgeBaseAgent", + instructions=""" + You are an agent specialized in searching a knowledge base. + You will receive a single search query as input. + Use the 'search_knowledgebase' tool to perform a search, then return a + JSON object with: + - 'summary': a concise synthesis of the retrieved information in your own words + - 'sources': a list of citations with {type: "kb", title: "...", section: "..."} + - 'no_results': true/false + + If the tool returns no matches, set "no_results": true and keep "sources" empty. + Do NOT make up information. Do NOT return raw search results or long quotes. + """, + tools=[ + agents.function_tool(client_manager.knowledgebase.search_knowledgebase), + ], + # a faster, smaller model for quick searches + model=agents.OpenAIChatCompletionsModel( + model=worker_model, openai_client=client_manager.openai_client + ), + ) + + # Main Agent: more expensive and slower, but better at complex planning + main_agent = agents.Agent( + name="MainAgent", + instructions=""" + You are a deep research agent and your goal is to conduct in-depth, multi-turn + research by breaking down complex queries, using the provided tools, and + synthesizing the information into a comprehensive report. + + You have access to the following tools: + 1. 'search_knowledgebase' - use this tool to search for information in a + knowledge base. The knowledge base reflects a subset of Wikipedia as + of May 2025. + 2. 'get_web_search_grounded_response' - use this tool for current events, + news, fact-checking or when the information in the knowledge base is + not sufficient to answer the question. + + Both tools will not return raw search results or the sources themselves. + Instead, they will return a concise summary of the key findings, along + with the sources used to generate the summary. + + For best performance, divide complex queries into simpler sub-queries + Before calling either tool, always explain your reasoning for doing so. + + Note that the 'get_web_search_grounded_response' tool will expand the query + into multiple search queries and execute them. It will also return the + queries it executed. Do not repeat them. + + **Routing Guidelines:** + - When answering a question, you should first try to use the 'search_knowledgebase' + tool, unless the question requires recent information after May 2025 or + has explicit recency cues. + - If either tool returns insufficient information for a given query, try + reformulating or using the other tool. You can call either tool multiple + times to get the information you need to answer the user's question. + + **Guidelines for synthesis** + - After collecting results, write the final answer from your own synthesis. + - Add a "Sources" section listing unique sources, formatted as: + [1] Publisher - URL + [2] Wikipedia: (Section:
) + Order by first mention in your text. Every factual sentence in your final + response must map to at least one source. + - If web and knowledge base disagree, surface the disagreement and prefer sources + with newer publication dates. + - Do not invent URLs or sources. + - If both tools fail, say so and suggest 2–3 refined queries. + + Be sure to mention the sources in your response, including the URL if available, + and do not make up information. + """, + # Allow the planner agent to invoke the worker agent. + # The long context provided to the worker agent is hidden from the main agent. + tools=[ + kb_agent.as_tool( + tool_name="search_knowledgebase", + tool_description=( + "Search the knowledge base for a query and return a concise summary " + "of the key findings, along with the sources used to generate " + "the summary" + ), + ), + agents.function_tool( + gemini_grounding_tool.get_web_search_grounded_response, + name_override="search_web", + ), + ], + # a larger, more capable model for planning and reasoning over summaries + model=agents.OpenAIChatCompletionsModel( + model=planner_model, openai_client=client_manager.openai_client + ), + ) + + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?" + ], + [ + "How does the annual growth in the 50th-percentile income " + "in the US compare with that in Canada?", + ], + [ + "Provide a complete list of all countries that have a population " + "over 100 million in 2026, that contain over 500 billion cubic meters " + "of internal fresh water for the year 2021, and have a mortality rate " + "less than the birth rate for the year 2021. The order of the list " + "should be based on the largest population size in 2026." + ], + ], + title="2.2.3: Multi-Agent Orchestrator-worker for Retrieval-Augmented Generation with Multiple Tools", + ) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/2_multi_agent/fan_out.py b/src/2_frameworks/2_multi_agent/fan_out.py index fd36bed..f189bac 100644 --- a/src/2_frameworks/2_multi_agent/fan_out.py +++ b/src/2_frameworks/2_multi_agent/fan_out.py @@ -29,23 +29,13 @@ from src.utils import set_up_logging, setup_langfuse_tracer from src.utils.async_utils import gather_with_progress, rate_limited +from src.utils.client_manager import AsyncClientManager from src.utils.langfuse.shared_client import langfuse_client -AGENT_LLM_NAMES = { - "worker": "gemini-2.5-flash", # less expensive, - "reviewer": "gemini-2.5-pro", # more expenive, better at reasoning -} - MAX_CONCURRENCY = {"worker": 50, "reviewer": 50} MAX_GENERATED_TOKENS = {"worker": 16384, "reviewer": 32768} -parser = argparse.ArgumentParser() -parser.add_argument("--source_dataset", required=True) -parser.add_argument( - "--num_rows", default=-1, type=int, help="Set to -1 to select all rows." -) -parser.add_argument("--output_report", default="report.md") Document = dict[str, Any] @@ -246,26 +236,6 @@ def group_conflicts( ] -async_openai_client = openai.AsyncOpenAI() - -worker_agent = agents.Agent( - "Conflict-detection Agent", - instructions=( - "Identify conflicting information (if any) in and between these documents. " - "Be sure to show your reasoning, even if there is no conflict. " - "If no conflict is found between the two documents, use an empty list." - ), - output_type=ConflictSummary, - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["worker"], openai_client=async_openai_client - ), - model_settings=agents.ModelSettings( - reasoning=openai.types.Reasoning(effort="high", generate_summary="detailed"), - max_tokens=MAX_GENERATED_TOKENS["worker"], - ), -) - - async def process_document_pair(document_pair: DocumentPair) -> ConflictSummary | None: """Process one document pair. @@ -306,23 +276,6 @@ async def process_fan_out( ] -conflict_review_agent = agents.Agent( - "Conflict-review agent", - instructions=( - "Given the documents suggested to be in conflict with information " - "in the current document, analyze whether the suggestions are valid." - ), - output_type=ConflictReview, - model=agents.OpenAIChatCompletionsModel( - model=AGENT_LLM_NAMES["reviewer"], openai_client=async_openai_client - ), - model_settings=agents.ModelSettings( - reasoning=openai.types.Reasoning(effort="high", generate_summary="detailed"), - max_tokens=MAX_GENERATED_TOKENS["reviewer"], - ), -) - - async def process_one_review( conflicted_document: ConflictedDocument, ) -> ConflictReview | None: @@ -371,10 +324,58 @@ async def process_conflict_reviews( if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--source_dataset", required=True) + parser.add_argument( + "--num_rows", default=-1, type=int, help="Set to -1 to select all rows." + ) + parser.add_argument("--output_report", default="report.md") args = parser.parse_args() + set_up_logging() setup_langfuse_tracer() + client_manager = AsyncClientManager() + + worker_agent = agents.Agent( + "Conflict-detection Agent", + instructions=( + "Identify conflicting information (if any) in and between these documents. " + "Be sure to show your reasoning, even if there is no conflict. " + "If no conflict is found between the two documents, use an empty list." + ), + output_type=ConflictSummary, + model=agents.OpenAIChatCompletionsModel( + model=client_manager.configs.default_worker_model, + openai_client=client_manager.openai_client, + ), + model_settings=agents.ModelSettings( + reasoning=openai.types.Reasoning( + effort="high", generate_summary="detailed" + ), + max_tokens=MAX_GENERATED_TOKENS["worker"], + ), + ) + + conflict_review_agent = agents.Agent( + "Conflict-review agent", + instructions=( + "Given the documents suggested to be in conflict with information " + "in the current document, analyze whether the suggestions are valid." + ), + output_type=ConflictReview, + model=agents.OpenAIChatCompletionsModel( + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, + ), + model_settings=agents.ModelSettings( + reasoning=openai.types.Reasoning( + effort="high", generate_summary="detailed" + ), + max_tokens=MAX_GENERATED_TOKENS["reviewer"], + ), + ) + dataset_dict = datasets.load_dataset(args.source_dataset) assert isinstance(dataset_dict, datasets.DatasetDict) documents = list(dataset_dict["train"])[: args.num_rows] diff --git a/src/2_frameworks/2_multi_agent/verbose.py b/src/2_frameworks/2_multi_agent/verbose.py index 25efe6a..16e3aa4 100644 --- a/src/2_frameworks/2_multi_agent/verbose.py +++ b/src/2_frameworks/2_multi_agent/verbose.py @@ -8,32 +8,24 @@ """ import asyncio -import contextlib -import logging -import signal -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from pydantic import BaseModel from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, - get_weaviate_async_client, oai_agent_items_to_gradio_messages, pretty_print, setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client - - -load_dotenv(verbose=True) - -logging.basicConfig(level=logging.INFO) +from src.utils.logging import set_up_logging PLANNER_INSTRUCTIONS = """\ @@ -94,12 +86,14 @@ class ResearchReport(BaseModel): full_report: str -async def _create_search_plan(planner_agent: agents.Agent, query: str) -> SearchPlan: +async def _create_search_plan( + planner_agent: agents.Agent, query: str, session: agents.Session | None = None +) -> SearchPlan: """Create a search plan using the planner agent.""" with langfuse_client.start_as_current_span( name="create_search_plan", input=query ) as planner_span: - response = await agents.Runner.run(planner_agent, input=query) + response = await agents.Runner.run(planner_agent, input=query, session=session) search_plan = response.final_output_as(SearchPlan) planner_span.update(output=search_plan) @@ -107,7 +101,10 @@ async def _create_search_plan(planner_agent: agents.Agent, query: str) -> Search async def _generate_final_report( - writer_agent: agents.Agent, search_results: list[str], query: str + writer_agent: agents.Agent, + search_results: list[str], + query: str, + session: agents.Session | None = None, ) -> agents.RunResult: """Generate the final report using the writer agent.""" input_data = f"Original question: {query}\n" @@ -118,127 +115,160 @@ async def _generate_final_report( with langfuse_client.start_as_current_span( name="generate_final_report", input=input_data ) as writer_span: - response = await agents.Runner.run(writer_agent, input=input_data) + response = await agents.Runner.run( + writer_agent, input=input_data, session=session + ) writer_span.update(output=response.final_output) return response -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_weaviate_client.close() - await async_openai_client.close() - +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + """Run multi-agent planner-researcher setup.""" + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -async def _main(question: str, gr_messages: list[ChatMessage]): - planner_agent = agents.Agent( - name="Planner Agent", - instructions=PLANNER_INSTRUCTIONS, - model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client - ), - output_type=SearchPlan, - ) - research_agent = agents.Agent( - name="Research Agent", - instructions=RESEARCHER_INSTRUCTIONS, - tools=[agents.function_tool(async_knowledgebase.search_knowledgebase)], - model=agents.OpenAIChatCompletionsModel( - model=configs.default_worker_model, - openai_client=async_openai_client, - ), - model_settings=agents.ModelSettings(tool_choice="required"), - ) - writer_agent = agents.Agent( - name="Writer Agent", - instructions=WRITER_INSTRUCTIONS, - model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client - ), - output_type=ResearchReport, - ) - - gr_messages.append(ChatMessage(role="user", content=question)) - yield gr_messages + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) with langfuse_client.start_as_current_span( - name="Multi-Agent-Trace", input=question + name="Multi-Agent-Trace", input=query ) as agents_span: # Create a search plan - search_plan = await _create_search_plan(planner_agent, question) - gr_messages.append( - ChatMessage(role="assistant", content=f"Search Plan:\n{search_plan}") + search_plan = await _create_search_plan(planner_agent, query, session=session) + turn_messages.append( + ChatMessage( + role="assistant", + content="", + metadata={"title": "**Search Plan**", "id": "search-plan"}, + ) ) - pretty_print(gr_messages) - yield gr_messages + for step in search_plan.search_steps: + turn_messages.append( + ChatMessage( + role="assistant", + content=(f"_Reasoning:_ {step.reasoning}"), + metadata={ + "title": f"**Search Term:** {step.search_term}", + "parent_id": "search-plan", + "status": "done", # This makes it collapsed by default + }, + ) + ) + pretty_print(turn_messages) + yield turn_messages # Execute the search plan + # NOTE: searches are done sequentially here for simplicity. + # TODO: As an exercise, try to paralleize the execution of the search steps. search_results = [] for step in search_plan.search_steps: with langfuse_client.start_as_current_span( name="execute_search_step", input=step.search_term ) as search_span: response = await agents.Runner.run( - research_agent, input=step.search_term + research_agent, input=step.search_term, session=session ) search_result: str = response.final_output search_span.update(output=search_result) search_results.append(search_result) - gr_messages += oai_agent_items_to_gradio_messages(response.new_items) - yield gr_messages + turn_messages += oai_agent_items_to_gradio_messages( + response.new_items, is_final_output=False + ) + yield turn_messages # Generate the final report writer_agent_response = await _generate_final_report( - writer_agent, search_results, question + writer_agent, search_results, query, session=session ) agents_span.update(output=writer_agent_response.final_output) report = writer_agent_response.final_output_as(ResearchReport) - gr_messages.append( + turn_messages.append( ChatMessage( role="assistant", - content=f"Summary:\n{report.summary}\n\nFull Report:\n{report.full_report}", + content=f"## Summary\n{report.summary}\n\n## Full Report\n{report.full_report}", ) ) - pretty_print(gr_messages) - yield gr_messages + pretty_print(turn_messages) + yield turn_messages if __name__ == "__main__": - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) + load_dotenv(verbose=True) + + # Set logging level and suppress some noisy logs from dependencies + set_up_logging() - async_openai_client = AsyncOpenAI() + # Set up LangFuse for tracing setup_langfuse_tracer() - with gr.Blocks(title="OAI Agent SDK - Multi-agent") as app: - chatbot = gr.Chatbot(type="messages", label="Agent", height=600) - chat_message = gr.Textbox(lines=1, label="Ask a question") - chat_message.submit(_main, [chat_message, chatbot], [chatbot]) + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + # Use smaller, faster model for focused search tasks + worker_model = client_manager.configs.default_worker_model + # Use larger, more capable model for complex planning and reasoning + planner_model = client_manager.configs.default_planner_model + + planner_agent = agents.Agent( + name="Planner Agent", + instructions=PLANNER_INSTRUCTIONS, + model=agents.OpenAIChatCompletionsModel( + model=planner_model, + openai_client=client_manager.openai_client, + ), + output_type=SearchPlan, + ) + + research_agent = agents.Agent( + name="Research Agent", + instructions=RESEARCHER_INSTRUCTIONS, + tools=[agents.function_tool(client_manager.knowledgebase.search_knowledgebase)], + model=agents.OpenAIChatCompletionsModel( + model=worker_model, + openai_client=client_manager.openai_client, + ), + # Force the agent to use the search tool for every query + model_settings=agents.ModelSettings(tool_choice="required"), + ) + + writer_agent = agents.Agent( + name="Writer Agent", + instructions=WRITER_INSTRUCTIONS, + model=agents.OpenAIChatCompletionsModel( + model=planner_model, # Stronger model for complex synthesis + openai_client=client_manager.openai_client, + ), + output_type=ResearchReport, + ) - signal.signal(signal.SIGINT, _handle_sigint) + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + [ + "At which university did the SVP Software Engineering" + " at Apple (as of June 2025) earn their engineering degree?" + ], + [ + "How does the annual growth in the 50th-percentile income " + "in the US compare with that in Canada?", + ], + ], + title="2.2.1: Plan-and-Execute Multi-Agent System for Retrieval-Augmented Generation", + ) try: - app.launch(server_name="0.0.0.0") + demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/3_code_interpreter/app.py b/src/2_frameworks/3_code_interpreter/app.py index 3492f4b..0a97522 100644 --- a/src/2_frameworks/3_code_interpreter/app.py +++ b/src/2_frameworks/3_code_interpreter/app.py @@ -5,29 +5,27 @@ You will need your E2B API Key. """ -import os +import asyncio from pathlib import Path +from typing import Any, AsyncGenerator import agents import gradio as gr from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.utils import ( CodeInterpreter, oai_agent_stream_to_gradio_messages, pretty_print, set_up_logging, - setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client -load_dotenv(verbose=True) - -set_up_logging() - CODE_INTERPRETER_INSTRUCTIONS = """\ The `code_interpreter` tool executes Python commands. \ Please note that data is not persisted. Each time you invoke this tool, \ @@ -43,17 +41,59 @@ but you won't be able to install packages. """ -async_openai_client = AsyncOpenAI() -code_interpreter = CodeInterpreter( - local_files=[ - Path("sandbox_content/"), - Path("tests/tool_tests/example_files/example_a.csv"), - ] -) +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] + + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) -async def _main(question: str, gr_messages: list[ChatMessage]): - setup_langfuse_tracer() + with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: + span.update(input=query) + + # Run the agent in streaming mode to get and display intermediate outputs + result_stream = agents.Runner.run_streamed( + main_agent, input=query, session=session + ) + + async for _item in result_stream.stream_events(): + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages + + span.update(output=result_stream.final_output) + + pretty_print(turn_messages) + yield turn_messages + + # Clear the turn messages after yielding to prepare for the next turn + turn_messages.clear() + + +if __name__ == "__main__": + load_dotenv(verbose=True) + + set_up_logging() + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + # Initialize code interpreter with local files that will be available to the agent + code_interpreter = CodeInterpreter( + local_files=[ + Path("sandbox_content/"), + Path("tests/tool_tests/example_files/example_a.csv"), + ] + ) main_agent = agents.Agent( name="Data Analysis Agent", @@ -65,37 +105,23 @@ async def _main(question: str, gr_messages: list[ChatMessage]): ) ], model=agents.OpenAIChatCompletionsModel( - model=os.getenv("DEFAULT_PLANNER_MODEL", "gemini-2.5-flash"), - openai_client=async_openai_client, + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, ), ) - with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=question) - - result_stream = agents.Runner.run_streamed(main_agent, input=question) - async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages - - span.update(output=result_stream.final_output) - - pretty_print(gr_messages) - yield gr_messages - - -demo = gr.ChatInterface( - _main, - title="2.1 OAI Agent SDK ReAct + LangFuse Code Interpreter", - type="messages", - examples=[ - "What is the sum of the column `x` in this example_a.csv?", - "What is the sum of the column `y` in this example_a.csv?", - "Create a linear best-fit line for the data in example_a.csv.", - ], -) - + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + ["What is the sum of the column `x` in this example_a.csv?"], + ["What is the sum of the column `y` in this example_a.csv?"], + ["Create a linear best-fit line for the data in example_a.csv."], + ], + title="2.3. OAI Agent SDK ReAct + Code Interpreter Tool", + ) -if __name__ == "__main__": - demo.launch(share=True) + try: + demo.launch(share=True) + finally: + asyncio.run(client_manager.close()) diff --git a/src/2_frameworks/4_mcp/app.py b/src/2_frameworks/4_mcp/app.py index 1ede0b1..0f4a92f 100644 --- a/src/2_frameworks/4_mcp/app.py +++ b/src/2_frameworks/4_mcp/app.py @@ -4,52 +4,38 @@ """ import asyncio -import contextlib -import signal import subprocess -import sys +from typing import Any, AsyncGenerator import agents import gradio as gr from agents.mcp import MCPServerStdio, create_static_tool_filter from dotenv import load_dotenv from gradio.components.chatbot import ChatMessage -from openai import AsyncOpenAI from src.utils import ( - Configs, oai_agent_stream_to_gradio_messages, pretty_print, set_up_logging, - setup_langfuse_tracer, ) +from src.utils.agent_session import get_or_create_session +from src.utils.client_manager import AsyncClientManager +from src.utils.gradio import COMMON_GRADIO_CONFIG from src.utils.langfuse.shared_client import langfuse_client -load_dotenv(verbose=True) - -set_up_logging() - - -configs = Configs() -async_openai_client = AsyncOpenAI() - - -async def _cleanup_clients() -> None: - """Close async clients.""" - await async_openai_client.close() - - -def _handle_sigint(signum: int, frame: object) -> None: - """Handle SIGINT signal to gracefully shutdown.""" - with contextlib.suppress(Exception): - asyncio.get_event_loop().run_until_complete(_cleanup_clients()) - sys.exit(0) - - -async def _main(question: str, gr_messages: list[ChatMessage]): +async def _main( + query: str, history: list[ChatMessage], session_state: dict[str, Any] +) -> AsyncGenerator[list[ChatMessage], Any]: """Initialize MCP Git server and run the agent.""" - setup_langfuse_tracer() + # Initialize list of chat messages for a single turn + turn_messages: list[ChatMessage] = [] + + # Construct an in-memory SQLite session for the agent to maintain + # conversation history across multiple turns of a chat + # This makes it possible to ask follow-up questions that refer to + # previous turns in the conversation + session = get_or_create_session(history, session_state) # Get the absolute path to the current git repository, regardless of where # the script is run from @@ -58,7 +44,7 @@ async def _main(question: str, gr_messages: list[ChatMessage]): ).strip() with langfuse_client.start_as_current_span(name="Agents-SDK-Trace") as span: - span.update(input=question) + span.update(input=query) async with MCPServerStdio( name="Git server", @@ -75,40 +61,50 @@ async def _main(question: str, gr_messages: list[ChatMessage]): instructions=f"Answer questions about the git repository at {repo_path}, use that for repo_path", mcp_servers=[mcp_server], model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, - openai_client=async_openai_client, + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, ), ) - result_stream = agents.Runner.run_streamed(agent, input=question) + result_stream = agents.Runner.run_streamed( + agent, input=query, session=session + ) async for _item in result_stream.stream_events(): - gr_messages += oai_agent_stream_to_gradio_messages(_item) - if len(gr_messages) > 0: - yield gr_messages + turn_messages += oai_agent_stream_to_gradio_messages(_item) + if len(turn_messages) > 0: + yield turn_messages span.update(output=result_stream.final_output) - pretty_print(gr_messages) - yield gr_messages + pretty_print(turn_messages) + yield turn_messages - -demo = gr.ChatInterface( - _main, - title="2.4 OAI Agent SDK MCP", - type="messages", - examples=[ - "Summarize the last change in the repository.", - "How many branches currently exist on the remote?", - ], -) + # Clear the turn messages after yielding to prepare for the next turn + turn_messages.clear() if __name__ == "__main__": - configs = Configs() - - signal.signal(signal.SIGINT, _handle_sigint) + load_dotenv(verbose=True) + + set_up_logging() + + # Initialize client manager + # This class initializes the OpenAI and Weaviate async clients, as well as the + # Weaviate knowledge base tool. The initialization is done once when the clients + # are first accessed, and the clients are reused for subsequent calls. + client_manager = AsyncClientManager() + + demo = gr.ChatInterface( + _main, + **COMMON_GRADIO_CONFIG, + examples=[ + ["Summarize the last change in the repository."], + ["How many branches currently exist on the remote?"], + ], + title="2.4 OAI Agent SDK + Git MCP Server", + ) try: demo.launch(share=True) finally: - asyncio.run(_cleanup_clients()) + asyncio.run(client_manager.close()) diff --git a/src/3_evals/1_llm_judge/run_eval.py b/src/3_evals/1_llm_judge/run_eval.py index 82ce928..7687103 100644 --- a/src/3_evals/1_llm_judge/run_eval.py +++ b/src/3_evals/1_llm_judge/run_eval.py @@ -7,17 +7,14 @@ import pydantic from dotenv import load_dotenv from langfuse._client.datasets import DatasetItemClient -from openai import AsyncOpenAI from rich.progress import track from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, gather_with_progress, - get_weaviate_async_client, set_up_logging, setup_langfuse_tracer, ) +from src.utils.client_manager import AsyncClientManager from src.utils.langfuse.shared_client import flush_langfuse, langfuse_client @@ -109,7 +106,8 @@ async def run_evaluator_agent(evaluator_query: EvaluatorQuery) -> EvaluatorRespo instructions=EVALUATOR_INSTRUCTIONS, output_type=EvaluatorResponse, model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, ), ) @@ -149,55 +147,15 @@ async def run_and_evaluate( return traced_response, evaluator_response -parser = argparse.ArgumentParser() -parser.add_argument("--langfuse_dataset_name", required=True) -parser.add_argument("--run_name", required=True) -parser.add_argument("--limit", type=int) - - -if __name__ == "__main__": - args = parser.parse_args() - - lf_dataset_items = langfuse_client.get_dataset(args.langfuse_dataset_name).items - if args.limit is not None: - lf_dataset_items = lf_dataset_items[: args.limit] - - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_openai_client = AsyncOpenAI() - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - ) - - tracer = setup_langfuse_tracer() - - main_agent = agents.Agent( - name="Wikipedia Agent", - instructions=SYSTEM_MESSAGE, - tools=[agents.function_tool(async_knowledgebase.search_knowledgebase)], - model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client - ), - ) +async def _main() -> None: coros = [ run_and_evaluate( - run_name=args.run_name, - main_agent=main_agent, - lf_dataset_item=_item, + run_name=args.run_name, main_agent=main_agent, lf_dataset_item=_item ) for _item in lf_dataset_items ] - results = asyncio.run( - gather_with_progress(coros, description="Running agent and evaluating") + results = await gather_with_progress( + coros, description="Running agent and evaluating" ) for _traced_response, _eval_output in track( @@ -213,5 +171,32 @@ async def run_and_evaluate( ) flush_langfuse() + await client_manager.close() + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--langfuse_dataset_name", required=True) + parser.add_argument("--run_name", required=True) + parser.add_argument("--limit", type=int) + args = parser.parse_args() + + lf_dataset_items = langfuse_client.get_dataset(args.langfuse_dataset_name).items + if args.limit is not None: + lf_dataset_items = lf_dataset_items[: args.limit] + + client_manager = AsyncClientManager() + + setup_langfuse_tracer() + + main_agent = agents.Agent( + name="Wikipedia Agent", + instructions=SYSTEM_MESSAGE, + tools=[agents.function_tool(client_manager.knowledgebase.search_knowledgebase)], + model=agents.OpenAIChatCompletionsModel( + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, + ), + ) - asyncio.run(async_weaviate_client.close()) + asyncio.run(_main()) diff --git a/src/3_evals/2_synthetic_data/synthesize_data.py b/src/3_evals/2_synthetic_data/synthesize_data.py index c1c1839..74a9799 100644 --- a/src/3_evals/2_synthetic_data/synthesize_data.py +++ b/src/3_evals/2_synthetic_data/synthesize_data.py @@ -14,27 +14,21 @@ import agents import pydantic from dotenv import load_dotenv -from openai import AsyncOpenAI from rich.progress import track from src.utils import ( - AsyncWeaviateKnowledgeBase, - Configs, gather_with_progress, - get_weaviate_async_client, pretty_print, rate_limited, set_up_logging, setup_langfuse_tracer, ) +from src.utils.client_manager import AsyncClientManager from src.utils.data import get_dataset, get_dataset_url_hash from src.utils.langfuse.shared_client import langfuse_client from src.utils.tools.news_events import NewsEvent, get_news_events -load_dotenv(verbose=True) -set_up_logging() - SYSTEM_MESSAGE = """\ Example questions: \ {example_questions} @@ -96,7 +90,8 @@ async def generate_synthetic_test_cases( instructions="Extract the structured output from the given text.", output_type=list[_SyntheticTestCase], model=agents.OpenAIChatCompletionsModel( - model=configs.default_worker_model, openai_client=async_openai_client + model=client_manager.configs.default_worker_model, + openai_client=client_manager.openai_client, ), ) @@ -118,28 +113,15 @@ async def generate_synthetic_test_cases( if __name__ == "__main__": args = parser.parse_args() - configs = Configs() - async_weaviate_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) - async_knowledgebase = AsyncWeaviateKnowledgeBase( - async_weaviate_client, - collection_name=configs.weaviate_collection_name, - max_concurrency=args.max_concurrency, - ) + load_dotenv(verbose=True) + set_up_logging() setup_langfuse_tracer() generator = random.Random(0) dataset_name_hash = get_dataset_url_hash(args.langfuse_dataset_name) - async_openai_client = AsyncOpenAI() + client_manager = AsyncClientManager() # Create langfuse dataset and upload. langfuse_client.create_dataset( @@ -168,9 +150,10 @@ async def generate_synthetic_test_cases( json_schema=_SyntheticTestCases.model_json_schema(), ), # HINT: replace this tool with your own knowledge base search tool. - tools=[agents.function_tool(async_knowledgebase.search_knowledgebase)], + tools=[agents.function_tool(client_manager.knowledgebase.search_knowledgebase)], model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, ), ) diff --git a/src/3_evals/2_synthetic_data/synthesize_data_e2b.py b/src/3_evals/2_synthetic_data/synthesize_data_e2b.py index 672752c..d01f528 100644 --- a/src/3_evals/2_synthetic_data/synthesize_data_e2b.py +++ b/src/3_evals/2_synthetic_data/synthesize_data_e2b.py @@ -25,26 +25,21 @@ import agents import pydantic from dotenv import load_dotenv -from openai import AsyncOpenAI from rich.progress import track from src.utils import ( CodeInterpreter, - Configs, gather_with_progress, pretty_print, rate_limited, set_up_logging, setup_langfuse_tracer, ) +from src.utils.client_manager import AsyncClientManager from src.utils.data import get_dataset_url_hash from src.utils.langfuse.shared_client import langfuse_client -load_dotenv(verbose=True) -set_up_logging() -async_openai_client = AsyncOpenAI() - SYSTEM_MESSAGE = """\ Example questions: \ {example_questions} @@ -58,13 +53,6 @@ so each "question" is self-contained and can be answered on its own. """ -parser = argparse.ArgumentParser() -parser.add_argument("--langfuse_dataset_name", required=True) -parser.add_argument("--limit", type=int, default=18) -parser.add_argument("--max_concurrency", type=int, default=3) - -configs = Configs() - class _Citation(pydantic.BaseModel): """Represents one cited source.""" @@ -81,51 +69,6 @@ class _SyntheticTestCase(pydantic.BaseModel): citations: list[_Citation] -code_interpreter = CodeInterpreter( - template_name=configs.default_code_interpreter_template, - local_files=[ - Path("sandbox_content/"), - Path("tests/tool_tests/example_files/example_a.csv"), - ], -) - -example_questions = [ - _SyntheticTestCase( - question="How many airports are listed in Airlines.sqlite?", - expected_answer="104", - citations=[ - _Citation( - title="Airlines.sqlite", - section="SELECT COUNT(DISTINCT airport_code) FROM airports_data;", - ) - ], - ), - _SyntheticTestCase( - question="What unique aircraft codes are listed in Airlines.sqlite?", - expected_answer="773,763,SU9,320,321,319,733,CN1,CR2", - citations=[ - _Citation( - title="Airlines.sqlite", - section="SELECT DISTINCT aircraft_code FROM aircrafts_data;", - ) - ], - ), -] -example_questions_str = pretty_print(example_questions) - -test_case_generator_agent = agents.Agent( - name="Test Case Generator Agent", - instructions=SYSTEM_MESSAGE.format( - example_questions=example_questions_str, - json_schema=_SyntheticTestCase.model_json_schema(), - ), - tools=[agents.function_tool(code_interpreter.run_code)], - model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client - ), -) - - async def generate_synthetic_test_cases( test_case_generator_agent: agents.Agent, ) -> list[_SyntheticTestCase] | None: @@ -145,7 +88,8 @@ async def generate_synthetic_test_cases( instructions="Extract the structured output from the given text.", output_type=list[_SyntheticTestCase], model=agents.OpenAIChatCompletionsModel( - model=configs.default_planner_model, openai_client=async_openai_client + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, ), ) @@ -168,9 +112,64 @@ async def generate_synthetic_test_cases( if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--langfuse_dataset_name", required=True) + parser.add_argument("--limit", type=int, default=18) + parser.add_argument("--max_concurrency", type=int, default=3) args = parser.parse_args() + load_dotenv(verbose=True) + + set_up_logging() + + client_manager = AsyncClientManager() setup_langfuse_tracer() + + code_interpreter = CodeInterpreter( + template_name=client_manager.configs.default_code_interpreter_template, + local_files=[ + Path("sandbox_content/"), + Path("tests/tool_tests/example_files/example_a.csv"), + ], + ) + + example_questions = [ + _SyntheticTestCase( + question="How many airports are listed in Airlines.sqlite?", + expected_answer="104", + citations=[ + _Citation( + title="Airlines.sqlite", + section="SELECT COUNT(DISTINCT airport_code) FROM airports_data;", + ) + ], + ), + _SyntheticTestCase( + question="What unique aircraft codes are listed in Airlines.sqlite?", + expected_answer="773,763,SU9,320,321,319,733,CN1,CR2", + citations=[ + _Citation( + title="Airlines.sqlite", + section="SELECT DISTINCT aircraft_code FROM aircrafts_data;", + ) + ], + ), + ] + example_questions_str = pretty_print(example_questions) + + test_case_generator_agent = agents.Agent( + name="Test Case Generator Agent", + instructions=SYSTEM_MESSAGE.format( + example_questions=example_questions_str, + json_schema=_SyntheticTestCase.model_json_schema(), + ), + tools=[agents.function_tool(code_interpreter.run_code)], + model=agents.OpenAIChatCompletionsModel( + model=client_manager.configs.default_planner_model, + openai_client=client_manager.openai_client, + ), + ) + generator = random.Random(0) dataset_name_hash = get_dataset_url_hash(args.langfuse_dataset_name) diff --git a/src/utils/__init__.py b/src/utils/__init__.py index dc2e367..3e54184 100644 --- a/src/utils/__init__.py +++ b/src/utils/__init__.py @@ -1,6 +1,7 @@ """Shared toolings for reference implementations.""" from .async_utils import gather_with_progress, rate_limited +from .client_manager import AsyncClientManager from .data.batching import create_batches from .env_vars import Configs from .gradio.messages import ( diff --git a/src/utils/agent_session.py b/src/utils/agent_session.py new file mode 100644 index 0000000..c4292cf --- /dev/null +++ b/src/utils/agent_session.py @@ -0,0 +1,19 @@ +"""Session management utilities for agent conversations.""" + +import uuid +from typing import Any + +import agents +from gradio.components.chatbot import ChatMessage + + +def get_or_create_session( + history: list[ChatMessage], session_state: dict[str, Any] +) -> agents.SQLiteSession: + """Get existing session or create a new one for conversation persistence.""" + if len(history) == 0: + session = agents.SQLiteSession(session_id=str(uuid.uuid4())) + session_state["session"] = session + else: + session = session_state["session"] + return session diff --git a/src/utils/client_manager.py b/src/utils/client_manager.py new file mode 100644 index 0000000..b2a5b27 --- /dev/null +++ b/src/utils/client_manager.py @@ -0,0 +1,94 @@ +"""Async client lifecycle manager for Gradio applications. + +Provides idempotent initialization and proper cleanup of async clients +like Weaviate and OpenAI to prevent event loop conflicts during Gradio's +hot-reload process. +""" + +from openai import AsyncOpenAI +from weaviate.client import WeaviateAsyncClient + +from .env_vars import Configs +from .tools.kb_weaviate import AsyncWeaviateKnowledgeBase, get_weaviate_async_client + + +class AsyncClientManager: + """Manages async client lifecycle with lazy initialization and cleanup. + + This class ensures clients are created only once and properly closed, + preventing ResourceWarning errors from unclosed event loops. + + Parameters + ---------- + configs: Configs | None, optional, default=None + Configuration object for client setup. If None, a new ``Configs()`` is created. + + Examples + -------- + >>> manager = AsyncClientManager() + >>> # Access clients (created on first access) + >>> weaviate = manager.weaviate_client + >>> kb = manager.knowledgebase + >>> openai = manager.openai_client + >>> # In finally block or cleanup + >>> await manager.close() + """ + + def __init__(self, configs: Configs | None = None) -> None: + """Initialize manager with optional configs.""" + self._configs = configs + self._weaviate_client = None + self._openai_client = None + self._knowledgebase = None + self._initialized = False + + @property + def configs(self) -> Configs: + """Get or create configs instance.""" + if self._configs is None: + self._configs = Configs() # pyright: ignore[reportCallIssue] + return self._configs + + @property + def openai_client(self) -> AsyncOpenAI: + """Get or create OpenAI client.""" + if self._openai_client is None: + self._openai_client = AsyncOpenAI() + self._initialized = True + return self._openai_client + + @property + def weaviate_client(self) -> WeaviateAsyncClient: + """Get or create Weaviate client.""" + if self._weaviate_client is None: + self._weaviate_client = get_weaviate_async_client(self.configs) + self._initialized = True + return self._weaviate_client + + @property + def knowledgebase(self) -> AsyncWeaviateKnowledgeBase: + """Get or create knowledge base instance.""" + if self._knowledgebase is None: + self._knowledgebase = AsyncWeaviateKnowledgeBase( + self.weaviate_client, + collection_name=self.configs.weaviate_collection_name, + ) + self._initialized = True + return self._knowledgebase + + async def close(self) -> None: + """Close all initialized async clients.""" + if self._weaviate_client is not None: + await self._weaviate_client.close() + self._weaviate_client = None + + if self._openai_client is not None: + await self._openai_client.close() + self._openai_client = None + + self._knowledgebase = None + self._initialized = False + + def is_initialized(self) -> bool: + """Check if any clients have been initialized.""" + return self._initialized diff --git a/src/utils/gradio/__init__.py b/src/utils/gradio/__init__.py index e69de29..f89efb6 100644 --- a/src/utils/gradio/__init__.py +++ b/src/utils/gradio/__init__.py @@ -0,0 +1,10 @@ +import gradio as gr + + +COMMON_GRADIO_CONFIG = { + "chatbot": gr.Chatbot(height=600), + "textbox": gr.Textbox(lines=1, placeholder="Enter your prompt"), + # Additional input to maintain session state across multiple turns + # NOTE: Examples must be a list of lists when additional inputs are provided + "additional_inputs": gr.State(value={}, render=False), +} diff --git a/src/utils/gradio/messages.py b/src/utils/gradio/messages.py index 6200301..932929f 100644 --- a/src/utils/gradio/messages.py +++ b/src/utils/gradio/messages.py @@ -4,15 +4,10 @@ from agents import StreamEvent, stream_events from agents.items import MessageOutputItem, RunItem, ToolCallItem, ToolCallOutputItem -from gradio.components.chatbot import ChatMessage -from openai.types.responses import ( - ResponseCompletedEvent, - ResponseFunctionToolCall, - ResponseOutputMessage, - ResponseOutputText, -) - -from ..pretty_printing import pretty_print +from gradio.components.chatbot import ChatMessage, MetadataDict +from openai.types.responses import ResponseFunctionToolCall, ResponseOutputText +from openai.types.responses.response_completed_event import ResponseCompletedEvent +from openai.types.responses.response_output_message import ResponseOutputMessage if TYPE_CHECKING: @@ -36,14 +31,13 @@ def gradio_messages_to_oai_chat( return output -def _oai_response_output_item_to_gradio(item: RunItem) -> list[ChatMessage] | None: +def _oai_response_output_item_to_gradio( + item: RunItem, is_final_output: bool +) -> list[ChatMessage] | None: """Map OAI SDK new RunItem (response.new_items) to gr messages. Returns None if message is of unknown/unsupported type. """ - print(type(item)) - pretty_print(item) - if isinstance(item, ToolCallItem): raw_item = item.raw_item @@ -51,9 +45,9 @@ def _oai_response_output_item_to_gradio(item: RunItem) -> list[ChatMessage] | No return [ ChatMessage( role="assistant", - content=f"```\n{raw_item.arguments}\n```\n`{raw_item.call_id}`", + content=f"```\n{raw_item.arguments}\n```", metadata={ - "title": f"Used tool `{raw_item.name}`", + "title": f"🛠️ Used tool `{raw_item.name}`", }, ) ] @@ -68,7 +62,8 @@ def _oai_response_output_item_to_gradio(item: RunItem) -> list[ChatMessage] | No role="assistant", content=f"> {function_output}\n\n`{call_id}`", metadata={ - "title": "Tool response", + "title": "*Tool call output*", + "status": "done", # This makes it collapsed by default }, ) ] @@ -81,13 +76,25 @@ def _oai_response_output_item_to_gradio(item: RunItem) -> list[ChatMessage] | No if isinstance(response_text, ResponseOutputText): output_texts.append(response_text.text) - return [ChatMessage(role="assistant", content=_text) for _text in output_texts] + return [ + ChatMessage( + role="assistant", + content=_text, + metadata={ + "title": "Intermediate Step", + "status": "done", # This makes it collapsed by default + } + if not is_final_output + else MetadataDict(), + ) + for _text in output_texts + ] return None def oai_agent_items_to_gradio_messages( - new_items: list[RunItem], + new_items: list[RunItem], is_final_output: bool = True ) -> list[ChatMessage]: """Parse agent sdk "new items" into a list of gr messages. @@ -95,7 +102,7 @@ def oai_agent_items_to_gradio_messages( """ output: list[ChatMessage] = [] for item in new_items: - maybe_messages = _oai_response_output_item_to_gradio(item) + maybe_messages = _oai_response_output_item_to_gradio(item, is_final_output) if maybe_messages is not None: output.extend(maybe_messages) @@ -110,30 +117,49 @@ def oai_agent_stream_to_gradio_messages( Adds extra data for tool use to make the gradio display informative. """ output: list[ChatMessage] = [] + if isinstance(stream_event, stream_events.RawResponsesStreamEvent): data = stream_event.data if isinstance(data, ResponseCompletedEvent): + # The completed event may contain multiple output messages, + # including tool calls and final outputs. + # If there is at least one tool call, we mark the response as a thought. + is_thought = len(data.response.output) > 1 and any( + isinstance(message, ResponseFunctionToolCall) + for message in data.response.output + ) + for message in data.response.output: if isinstance(message, ResponseOutputMessage): for _item in message.content: if isinstance(_item, ResponseOutputText): output.append( - ChatMessage(role="assistant", content=_item.text) + ChatMessage( + role="assistant", + content=_item.text, + metadata={ + "title": "🧠 Thought", + "id": data.sequence_number, + } + if is_thought + else MetadataDict(), + ) ) - elif isinstance(message, ResponseFunctionToolCall): output.append( ChatMessage( role="assistant", content=f"```\n{message.arguments}\n```", metadata={ - "title": f"Used tool `{message.name}`", + "title": f"🛠️ Used tool `{message.name}`", }, ) ) + elif isinstance(stream_event, stream_events.RunItemStreamEvent): name = stream_event.name item = stream_event.item + if name == "tool_output" and isinstance(item, ToolCallOutputItem): output.append( ChatMessage( @@ -141,6 +167,7 @@ def oai_agent_stream_to_gradio_messages( content=f"```\n{item.output}\n```", metadata={ "title": "*Tool call output*", + "status": "done", # This makes it collapsed by default }, ) ) diff --git a/src/utils/langfuse/oai_sdk_setup.py b/src/utils/langfuse/oai_sdk_setup.py index 8432cc3..7458206 100644 --- a/src/utils/langfuse/oai_sdk_setup.py +++ b/src/utils/langfuse/oai_sdk_setup.py @@ -5,7 +5,6 @@ """ import logfire -import nest_asyncio from opentelemetry import trace from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter from opentelemetry.sdk.trace import TracerProvider @@ -16,7 +15,6 @@ def configure_oai_agents_sdk(service_name: str) -> None: """Register Langfuse as tracing provider for OAI Agents SDK.""" - nest_asyncio.apply() logfire.configure(service_name=service_name, send_to_logfire=False, scrubbing=False) logfire.instrument_openai_agents() diff --git a/src/utils/logging.py b/src/utils/logging.py index 492d9e7..a0264e2 100644 --- a/src/utils/logging.py +++ b/src/utils/logging.py @@ -21,7 +21,7 @@ def filter(self, record: logging.LogRecord) -> bool: ) -def set_up_logging(): +def set_up_logging() -> None: """Set up Logging and Warning levels.""" root_logger = logging.getLogger() filter_ = IgnoreOpenAI401Filter() diff --git a/src/utils/tools/kb_weaviate.py b/src/utils/tools/kb_weaviate.py index c7d242b..d8c45e3 100644 --- a/src/utils/tools/kb_weaviate.py +++ b/src/utils/tools/kb_weaviate.py @@ -9,9 +9,9 @@ import pydantic import weaviate from weaviate import WeaviateAsyncClient -from weaviate.config import AdditionalConfig from ..async_utils import rate_limited +from ..env_vars import Configs class _Source(pydantic.BaseModel): @@ -141,18 +141,7 @@ def _vectorize(self, text: str) -> list[float]: return response.data[0].embedding -def get_weaviate_async_client( - http_host: str | None = None, - http_port: int | None = None, - http_secure: bool = False, - grpc_host: str | None = None, - grpc_port: int | None = None, - grpc_secure: bool = False, - api_key: str | None = None, - headers: dict[str, str] | None = None, - additional_config: AdditionalConfig | None = None, - skip_init_checks: bool = False, -) -> WeaviateAsyncClient: +def get_weaviate_async_client(configs: Configs) -> WeaviateAsyncClient: """Get an async Weaviate client. If no parameters are provided, the function will attempt to connect to a local @@ -199,16 +188,11 @@ def get_weaviate_async_client( An asynchronous Weaviate client configured with the provided parameters. """ return weaviate.use_async_with_custom( - http_host=http_host or os.getenv("WEAVIATE_HTTP_HOST", "localhost"), - http_port=http_port or int(os.getenv("WEAVIATE_HTTP_PORT", "8080")), - http_secure=http_secure - or os.getenv("WEAVIATE_HTTP_SECURE", "false").lower() == "true", - grpc_host=grpc_host or os.getenv("WEAVIATE_GRPC_HOST", "localhost"), - grpc_port=grpc_port or int(os.getenv("WEAVIATE_GRPC_PORT", "50051")), - grpc_secure=grpc_secure - or os.getenv("WEAVIATE_GRPC_SECURE", "false").lower() == "true", - auth_credentials=api_key or os.getenv("WEAVIATE_API_KEY"), - headers=headers, - additional_config=additional_config, - skip_init_checks=skip_init_checks, + http_host=configs.weaviate_http_host or "localhost", + http_port=configs.weaviate_http_port or 8080, + http_secure=configs.weaviate_http_secure or False, + grpc_host=configs.weaviate_grpc_host or "localhost", + grpc_port=configs.weaviate_grpc_port or 50051, + grpc_secure=configs.weaviate_grpc_secure or False, + auth_credentials=configs.weaviate_api_key or None, ) diff --git a/src/utils/tools/news_events.py b/src/utils/tools/news_events.py index 1138618..06a5a94 100644 --- a/src/utils/tools/news_events.py +++ b/src/utils/tools/news_events.py @@ -55,7 +55,14 @@ async def _fetch_current_events_html() -> str: "prop": "text", "format": "json", } - client = httpx.AsyncClient() + client = httpx.AsyncClient( + headers={ + "User-Agent": ( + "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36" + " (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36" + ) + } + ) with Progress( SpinnerColumn(), diff --git a/tests/tool_tests/test_integration.py b/tests/tool_tests/test_integration.py index b6a453a..847c8f3 100644 --- a/tests/tool_tests/test_integration.py +++ b/tests/tool_tests/test_integration.py @@ -33,15 +33,7 @@ async def weaviate_kb( configs: Configs, ) -> AsyncGenerator[AsyncWeaviateKnowledgeBase, None]: """Weaviate knowledgebase for testing.""" - async_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) + async_client = get_weaviate_async_client(configs) yield AsyncWeaviateKnowledgeBase( async_client=async_client, collection_name=configs.weaviate_collection_name diff --git a/tests/tool_tests/test_weaviate.py b/tests/tool_tests/test_weaviate.py index 4a232ed..474a93b 100644 --- a/tests/tool_tests/test_weaviate.py +++ b/tests/tool_tests/test_weaviate.py @@ -24,15 +24,7 @@ def configs(): @pytest_asyncio.fixture() async def weaviate_kb(configs): """Weaviate knowledgebase for testing.""" - async_client = get_weaviate_async_client( - http_host=configs.weaviate_http_host, - http_port=configs.weaviate_http_port, - http_secure=configs.weaviate_http_secure, - grpc_host=configs.weaviate_grpc_host, - grpc_port=configs.weaviate_grpc_port, - grpc_secure=configs.weaviate_grpc_secure, - api_key=configs.weaviate_api_key, - ) + async_client = get_weaviate_async_client(configs) yield AsyncWeaviateKnowledgeBase( async_client=async_client, collection_name=configs.weaviate_collection_name diff --git a/uv.lock b/uv.lock index 8b3b61a..0f3b160 100644 --- a/uv.lock +++ b/uv.lock @@ -26,7 +26,6 @@ dependencies = [ { name = "plotly" }, { name = "pydantic" }, { name = "pydantic-ai-slim", extra = ["logfire"] }, - { name = "pytest-asyncio" }, { name = "scikit-learn" }, { name = "weaviate-client" }, ] @@ -70,19 +69,18 @@ web-search = [ requires-dist = [ { name = "aiohttp", specifier = ">=3.12.14" }, { name = "beautifulsoup4", specifier = ">=4.13.4" }, - { name = "datasets", specifier = ">=3.6.0" }, - { name = "e2b-code-interpreter", specifier = ">=1.5.2" }, - { name = "gradio", specifier = ">=5.37.0" }, - { name = "langfuse", specifier = ">=3.1.3" }, + { name = "datasets", specifier = ">=4.4.0" }, + { name = "e2b-code-interpreter", specifier = ">=2.3.0" }, + { name = "gradio", specifier = ">=6.1.0" }, + { name = "langfuse", specifier = ">=3.9.0" }, { name = "lxml", specifier = ">=6.0.0" }, { name = "nest-asyncio", specifier = ">=1.6.0" }, { name = "numpy", specifier = "<2.3.0" }, - { name = "openai", specifier = ">=1.93.1" }, - { name = "openai-agents", specifier = ">=0.1.0" }, + { name = "openai", specifier = ">=2.6.0" }, + { name = "openai-agents", specifier = ">=0.4.0" }, { name = "plotly", specifier = ">=6.2.0" }, { name = "pydantic", specifier = ">=2.11.7" }, { name = "pydantic-ai-slim", extras = ["logfire"], specifier = ">=0.3.7" }, - { name = "pytest-asyncio", specifier = ">=0.25.2" }, { name = "scikit-learn", specifier = ">=1.7.0" }, { name = "weaviate-client", specifier = ">=4.15.4" }, ] @@ -100,8 +98,8 @@ dev = [ { name = "pip-audit", specifier = ">=2.7.3" }, { name = "pre-commit", specifier = ">=4.1.0" }, { name = "pytest", specifier = ">=8.3.4" }, - { name = "pytest-asyncio", specifier = ">=0.25.2" }, - { name = "pytest-cov", specifier = ">=6.0.0" }, + { name = "pytest-asyncio", specifier = ">=1.2.0" }, + { name = "pytest-cov", specifier = ">=7.0.0" }, { name = "pytest-mock", specifier = ">=3.14.0" }, { name = "ruff", specifier = ">=0.12.2" }, { name = "transformers", specifier = ">=4.54.1" }, @@ -163,7 +161,7 @@ wheels = [ [[package]] name = "aiohttp" -version = "3.13.2" +version = "3.13.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohappyeyeballs" }, @@ -174,76 +172,76 @@ dependencies = [ { name = "propcache" }, { name = "yarl" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" }, - { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" }, - { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" }, - { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" }, - { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" }, - { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" }, - { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" }, - { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" }, - { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" }, - { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" }, - { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" }, - { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" }, - { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" }, - { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" }, - { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" }, - { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" }, - { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" }, - { url = "https://files.pythonhosted.org/packages/bf/78/7e90ca79e5aa39f9694dcfd74f4720782d3c6828113bb1f3197f7e7c4a56/aiohttp-3.13.2-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:7519bdc7dfc1940d201651b52bf5e03f5503bda45ad6eacf64dda98be5b2b6be", size = 732139, upload-time = "2025-10-28T20:57:02.455Z" }, - { url = "https://files.pythonhosted.org/packages/db/ed/1f59215ab6853fbaa5c8495fa6cbc39edfc93553426152b75d82a5f32b76/aiohttp-3.13.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:088912a78b4d4f547a1f19c099d5a506df17eacec3c6f4375e2831ec1d995742", size = 490082, upload-time = "2025-10-28T20:57:04.784Z" }, - { url = "https://files.pythonhosted.org/packages/68/7b/fe0fe0f5e05e13629d893c760465173a15ad0039c0a5b0d0040995c8075e/aiohttp-3.13.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5276807b9de9092af38ed23ce120539ab0ac955547b38563a9ba4f5b07b95293", size = 489035, upload-time = "2025-10-28T20:57:06.894Z" }, - { url = "https://files.pythonhosted.org/packages/d2/04/db5279e38471b7ac801d7d36a57d1230feeee130bbe2a74f72731b23c2b1/aiohttp-3.13.2-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1237c1375eaef0db4dcd7c2559f42e8af7b87ea7d295b118c60c36a6e61cb811", size = 1720387, upload-time = "2025-10-28T20:57:08.685Z" }, - { url = "https://files.pythonhosted.org/packages/31/07/8ea4326bd7dae2bd59828f69d7fdc6e04523caa55e4a70f4a8725a7e4ed2/aiohttp-3.13.2-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96581619c57419c3d7d78703d5b78c1e5e5fc0172d60f555bdebaced82ded19a", size = 1688314, upload-time = "2025-10-28T20:57:10.693Z" }, - { url = "https://files.pythonhosted.org/packages/48/ab/3d98007b5b87ffd519d065225438cc3b668b2f245572a8cb53da5dd2b1bc/aiohttp-3.13.2-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a2713a95b47374169409d18103366de1050fe0ea73db358fc7a7acb2880422d4", size = 1756317, upload-time = "2025-10-28T20:57:12.563Z" }, - { url = "https://files.pythonhosted.org/packages/97/3d/801ca172b3d857fafb7b50c7c03f91b72b867a13abca982ed6b3081774ef/aiohttp-3.13.2-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:228a1cd556b3caca590e9511a89444925da87d35219a49ab5da0c36d2d943a6a", size = 1858539, upload-time = "2025-10-28T20:57:14.623Z" }, - { url = "https://files.pythonhosted.org/packages/f7/0d/4764669bdf47bd472899b3d3db91fffbe925c8e3038ec591a2fd2ad6a14d/aiohttp-3.13.2-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ac6cde5fba8d7d8c6ac963dbb0256a9854e9fafff52fbcc58fdf819357892c3e", size = 1739597, upload-time = "2025-10-28T20:57:16.399Z" }, - { url = "https://files.pythonhosted.org/packages/c4/52/7bd3c6693da58ba16e657eb904a5b6decfc48ecd06e9ac098591653b1566/aiohttp-3.13.2-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f2bef8237544f4e42878c61cef4e2839fee6346dc60f5739f876a9c50be7fcdb", size = 1555006, upload-time = "2025-10-28T20:57:18.288Z" }, - { url = "https://files.pythonhosted.org/packages/48/30/9586667acec5993b6f41d2ebcf96e97a1255a85f62f3c653110a5de4d346/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:16f15a4eac3bc2d76c45f7ebdd48a65d41b242eb6c31c2245463b40b34584ded", size = 1683220, upload-time = "2025-10-28T20:57:20.241Z" }, - { url = "https://files.pythonhosted.org/packages/71/01/3afe4c96854cfd7b30d78333852e8e851dceaec1c40fd00fec90c6402dd2/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:bb7fb776645af5cc58ab804c58d7eba545a97e047254a52ce89c157b5af6cd0b", size = 1712570, upload-time = "2025-10-28T20:57:22.253Z" }, - { url = "https://files.pythonhosted.org/packages/11/2c/22799d8e720f4697a9e66fd9c02479e40a49de3de2f0bbe7f9f78a987808/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:e1b4951125ec10c70802f2cb09736c895861cd39fd9dcb35107b4dc8ae6220b8", size = 1733407, upload-time = "2025-10-28T20:57:24.37Z" }, - { url = "https://files.pythonhosted.org/packages/34/cb/90f15dd029f07cebbd91f8238a8b363978b530cd128488085b5703683594/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:550bf765101ae721ee1d37d8095f47b1f220650f85fe1af37a90ce75bab89d04", size = 1550093, upload-time = "2025-10-28T20:57:26.257Z" }, - { url = "https://files.pythonhosted.org/packages/69/46/12dce9be9d3303ecbf4d30ad45a7683dc63d90733c2d9fe512be6716cd40/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:fe91b87fc295973096251e2d25a811388e7d8adf3bd2b97ef6ae78bc4ac6c476", size = 1758084, upload-time = "2025-10-28T20:57:28.349Z" }, - { url = "https://files.pythonhosted.org/packages/f9/c8/0932b558da0c302ffd639fc6362a313b98fdf235dc417bc2493da8394df7/aiohttp-3.13.2-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e0c8e31cfcc4592cb200160344b2fb6ae0f9e4effe06c644b5a125d4ae5ebe23", size = 1716987, upload-time = "2025-10-28T20:57:30.233Z" }, - { url = "https://files.pythonhosted.org/packages/5d/8b/f5bd1a75003daed099baec373aed678f2e9b34f2ad40d85baa1368556396/aiohttp-3.13.2-cp313-cp313-win32.whl", hash = "sha256:0740f31a60848d6edb296a0df827473eede90c689b8f9f2a4cdde74889eb2254", size = 425859, upload-time = "2025-10-28T20:57:32.105Z" }, - { url = "https://files.pythonhosted.org/packages/5d/28/a8a9fc6957b2cee8902414e41816b5ab5536ecf43c3b1843c10e82c559b2/aiohttp-3.13.2-cp313-cp313-win_amd64.whl", hash = "sha256:a88d13e7ca367394908f8a276b89d04a3652044612b9a408a0bb22a5ed976a1a", size = 452192, upload-time = "2025-10-28T20:57:34.166Z" }, - { url = "https://files.pythonhosted.org/packages/9b/36/e2abae1bd815f01c957cbf7be817b3043304e1c87bad526292a0410fdcf9/aiohttp-3.13.2-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:2475391c29230e063ef53a66669b7b691c9bfc3f1426a0f7bcdf1216bdbac38b", size = 735234, upload-time = "2025-10-28T20:57:36.415Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/1ee62dde9b335e4ed41db6bba02613295a0d5b41f74a783c142745a12763/aiohttp-3.13.2-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:f33c8748abef4d8717bb20e8fb1b3e07c6adacb7fd6beaae971a764cf5f30d61", size = 490733, upload-time = "2025-10-28T20:57:38.205Z" }, - { url = "https://files.pythonhosted.org/packages/1a/aa/7a451b1d6a04e8d15a362af3e9b897de71d86feac3babf8894545d08d537/aiohttp-3.13.2-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ae32f24bbfb7dbb485a24b30b1149e2f200be94777232aeadba3eecece4d0aa4", size = 491303, upload-time = "2025-10-28T20:57:40.122Z" }, - { url = "https://files.pythonhosted.org/packages/57/1e/209958dbb9b01174870f6a7538cd1f3f28274fdbc88a750c238e2c456295/aiohttp-3.13.2-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5d7f02042c1f009ffb70067326ef183a047425bb2ff3bc434ead4dd4a4a66a2b", size = 1717965, upload-time = "2025-10-28T20:57:42.28Z" }, - { url = "https://files.pythonhosted.org/packages/08/aa/6a01848d6432f241416bc4866cae8dc03f05a5a884d2311280f6a09c73d6/aiohttp-3.13.2-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:93655083005d71cd6c072cdab54c886e6570ad2c4592139c3fb967bfc19e4694", size = 1667221, upload-time = "2025-10-28T20:57:44.869Z" }, - { url = "https://files.pythonhosted.org/packages/87/4f/36c1992432d31bbc789fa0b93c768d2e9047ec8c7177e5cd84ea85155f36/aiohttp-3.13.2-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:0db1e24b852f5f664cd728db140cf11ea0e82450471232a394b3d1a540b0f906", size = 1757178, upload-time = "2025-10-28T20:57:47.216Z" }, - { url = "https://files.pythonhosted.org/packages/ac/b4/8e940dfb03b7e0f68a82b88fd182b9be0a65cb3f35612fe38c038c3112cf/aiohttp-3.13.2-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b009194665bcd128e23eaddef362e745601afa4641930848af4c8559e88f18f9", size = 1838001, upload-time = "2025-10-28T20:57:49.337Z" }, - { url = "https://files.pythonhosted.org/packages/d7/ef/39f3448795499c440ab66084a9db7d20ca7662e94305f175a80f5b7e0072/aiohttp-3.13.2-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c038a8fdc8103cd51dbd986ecdce141473ffd9775a7a8057a6ed9c3653478011", size = 1716325, upload-time = "2025-10-28T20:57:51.327Z" }, - { url = "https://files.pythonhosted.org/packages/d7/51/b311500ffc860b181c05d91c59a1313bdd05c82960fdd4035a15740d431e/aiohttp-3.13.2-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:66bac29b95a00db411cd758fea0e4b9bdba6d549dfe333f9a945430f5f2cc5a6", size = 1547978, upload-time = "2025-10-28T20:57:53.554Z" }, - { url = "https://files.pythonhosted.org/packages/31/64/b9d733296ef79815226dab8c586ff9e3df41c6aff2e16c06697b2d2e6775/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4ebf9cfc9ba24a74cf0718f04aac2a3bbe745902cc7c5ebc55c0f3b5777ef213", size = 1682042, upload-time = "2025-10-28T20:57:55.617Z" }, - { url = "https://files.pythonhosted.org/packages/3f/30/43d3e0f9d6473a6db7d472104c4eff4417b1e9df01774cb930338806d36b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:a4b88ebe35ce54205c7074f7302bd08a4cb83256a3e0870c72d6f68a3aaf8e49", size = 1680085, upload-time = "2025-10-28T20:57:57.59Z" }, - { url = "https://files.pythonhosted.org/packages/16/51/c709f352c911b1864cfd1087577760ced64b3e5bee2aa88b8c0c8e2e4972/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:98c4fb90bb82b70a4ed79ca35f656f4281885be076f3f970ce315402b53099ae", size = 1728238, upload-time = "2025-10-28T20:57:59.525Z" }, - { url = "https://files.pythonhosted.org/packages/19/e2/19bd4c547092b773caeb48ff5ae4b1ae86756a0ee76c16727fcfd281404b/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:ec7534e63ae0f3759df3a1ed4fa6bc8f75082a924b590619c0dd2f76d7043caa", size = 1544395, upload-time = "2025-10-28T20:58:01.914Z" }, - { url = "https://files.pythonhosted.org/packages/cf/87/860f2803b27dfc5ed7be532832a3498e4919da61299b4a1f8eb89b8ff44d/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:5b927cf9b935a13e33644cbed6c8c4b2d0f25b713d838743f8fe7191b33829c4", size = 1742965, upload-time = "2025-10-28T20:58:03.972Z" }, - { url = "https://files.pythonhosted.org/packages/67/7f/db2fc7618925e8c7a601094d5cbe539f732df4fb570740be88ed9e40e99a/aiohttp-3.13.2-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:88d6c017966a78c5265d996c19cdb79235be5e6412268d7e2ce7dee339471b7a", size = 1697585, upload-time = "2025-10-28T20:58:06.189Z" }, - { url = "https://files.pythonhosted.org/packages/0c/07/9127916cb09bb38284db5036036042b7b2c514c8ebaeee79da550c43a6d6/aiohttp-3.13.2-cp314-cp314-win32.whl", hash = "sha256:f7c183e786e299b5d6c49fb43a769f8eb8e04a2726a2bd5887b98b5cc2d67940", size = 431621, upload-time = "2025-10-28T20:58:08.636Z" }, - { url = "https://files.pythonhosted.org/packages/fb/41/554a8a380df6d3a2bba8a7726429a23f4ac62aaf38de43bb6d6cde7b4d4d/aiohttp-3.13.2-cp314-cp314-win_amd64.whl", hash = "sha256:fe242cd381e0fb65758faf5ad96c2e460df6ee5b2de1072fe97e4127927e00b4", size = 457627, upload-time = "2025-10-28T20:58:11Z" }, - { url = "https://files.pythonhosted.org/packages/c7/8e/3824ef98c039d3951cb65b9205a96dd2b20f22241ee17d89c5701557c826/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:f10d9c0b0188fe85398c61147bbd2a657d616c876863bfeff43376e0e3134673", size = 767360, upload-time = "2025-10-28T20:58:13.358Z" }, - { url = "https://files.pythonhosted.org/packages/a4/0f/6a03e3fc7595421274fa34122c973bde2d89344f8a881b728fa8c774e4f1/aiohttp-3.13.2-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:e7c952aefdf2460f4ae55c5e9c3e80aa72f706a6317e06020f80e96253b1accd", size = 504616, upload-time = "2025-10-28T20:58:15.339Z" }, - { url = "https://files.pythonhosted.org/packages/c6/aa/ed341b670f1bc8a6f2c6a718353d13b9546e2cef3544f573c6a1ff0da711/aiohttp-3.13.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c20423ce14771d98353d2e25e83591fa75dfa90a3c1848f3d7c68243b4fbded3", size = 509131, upload-time = "2025-10-28T20:58:17.693Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f0/c68dac234189dae5c4bbccc0f96ce0cc16b76632cfc3a08fff180045cfa4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e96eb1a34396e9430c19d8338d2ec33015e4a87ef2b4449db94c22412e25ccdf", size = 1864168, upload-time = "2025-10-28T20:58:20.113Z" }, - { url = "https://files.pythonhosted.org/packages/8f/65/75a9a76db8364b5d0e52a0c20eabc5d52297385d9af9c35335b924fafdee/aiohttp-3.13.2-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:23fb0783bc1a33640036465019d3bba069942616a6a2353c6907d7fe1ccdaf4e", size = 1719200, upload-time = "2025-10-28T20:58:22.583Z" }, - { url = "https://files.pythonhosted.org/packages/f5/55/8df2ed78d7f41d232f6bd3ff866b6f617026551aa1d07e2f03458f964575/aiohttp-3.13.2-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2e1a9bea6244a1d05a4e57c295d69e159a5c50d8ef16aa390948ee873478d9a5", size = 1843497, upload-time = "2025-10-28T20:58:24.672Z" }, - { url = "https://files.pythonhosted.org/packages/e9/e0/94d7215e405c5a02ccb6a35c7a3a6cfff242f457a00196496935f700cde5/aiohttp-3.13.2-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:0a3d54e822688b56e9f6b5816fb3de3a3a64660efac64e4c2dc435230ad23bad", size = 1935703, upload-time = "2025-10-28T20:58:26.758Z" }, - { url = "https://files.pythonhosted.org/packages/0b/78/1eeb63c3f9b2d1015a4c02788fb543141aad0a03ae3f7a7b669b2483f8d4/aiohttp-3.13.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7a653d872afe9f33497215745da7a943d1dc15b728a9c8da1c3ac423af35178e", size = 1792738, upload-time = "2025-10-28T20:58:29.787Z" }, - { url = "https://files.pythonhosted.org/packages/41/75/aaf1eea4c188e51538c04cc568040e3082db263a57086ea74a7d38c39e42/aiohttp-3.13.2-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:56d36e80d2003fa3fc0207fac644216d8532e9504a785ef9a8fd013f84a42c61", size = 1624061, upload-time = "2025-10-28T20:58:32.529Z" }, - { url = "https://files.pythonhosted.org/packages/9b/c2/3b6034de81fbcc43de8aeb209073a2286dfb50b86e927b4efd81cf848197/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:78cd586d8331fb8e241c2dd6b2f4061778cc69e150514b39a9e28dd050475661", size = 1789201, upload-time = "2025-10-28T20:58:34.618Z" }, - { url = "https://files.pythonhosted.org/packages/c9/38/c15dcf6d4d890217dae79d7213988f4e5fe6183d43893a9cf2fe9e84ca8d/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:20b10bbfbff766294fe99987f7bb3b74fdd2f1a2905f2562132641ad434dcf98", size = 1776868, upload-time = "2025-10-28T20:58:38.835Z" }, - { url = "https://files.pythonhosted.org/packages/04/75/f74fd178ac81adf4f283a74847807ade5150e48feda6aef024403716c30c/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:9ec49dff7e2b3c85cdeaa412e9d438f0ecd71676fde61ec57027dd392f00c693", size = 1790660, upload-time = "2025-10-28T20:58:41.507Z" }, - { url = "https://files.pythonhosted.org/packages/e7/80/7368bd0d06b16b3aba358c16b919e9c46cf11587dc572091031b0e9e3ef0/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:94f05348c4406450f9d73d38efb41d669ad6cd90c7ee194810d0eefbfa875a7a", size = 1617548, upload-time = "2025-10-28T20:58:43.674Z" }, - { url = "https://files.pythonhosted.org/packages/7d/4b/a6212790c50483cb3212e507378fbe26b5086d73941e1ec4b56a30439688/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:fa4dcb605c6f82a80c7f95713c2b11c3b8e9893b3ebd2bc9bde93165ed6107be", size = 1817240, upload-time = "2025-10-28T20:58:45.787Z" }, - { url = "https://files.pythonhosted.org/packages/ff/f7/ba5f0ba4ea8d8f3c32850912944532b933acbf0f3a75546b89269b9b7dde/aiohttp-3.13.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf00e5db968c3f67eccd2778574cf64d8b27d95b237770aa32400bd7a1ca4f6c", size = 1762334, upload-time = "2025-10-28T20:58:47.936Z" }, - { url = "https://files.pythonhosted.org/packages/7e/83/1a5a1856574588b1cad63609ea9ad75b32a8353ac995d830bf5da9357364/aiohttp-3.13.2-cp314-cp314t-win32.whl", hash = "sha256:d23b5fe492b0805a50d3371e8a728a9134d8de5447dce4c885f5587294750734", size = 464685, upload-time = "2025-10-28T20:58:50.642Z" }, - { url = "https://files.pythonhosted.org/packages/9f/4d/d22668674122c08f4d56972297c51a624e64b3ed1efaa40187607a7cb66e/aiohttp-3.13.2-cp314-cp314t-win_amd64.whl", hash = "sha256:ff0a7b0a82a7ab905cbda74006318d1b12e37c797eb1b0d4eb3e316cf47f658f", size = 498093, upload-time = "2025-10-28T20:58:52.782Z" }, +sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" }, + { url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" }, + { url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" }, + { url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" }, + { url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" }, + { url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" }, + { url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" }, + { url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" }, + { url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" }, + { url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" }, + { url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" }, + { url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" }, + { url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" }, + { url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" }, + { url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" }, + { url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" }, + { url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" }, + { url = "https://files.pythonhosted.org/packages/97/8a/12ca489246ca1faaf5432844adbfce7ff2cc4997733e0af120869345643a/aiohttp-3.13.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:5dff64413671b0d3e7d5918ea490bdccb97a4ad29b3f311ed423200b2203e01c", size = 734190, upload-time = "2026-01-03T17:30:45.832Z" }, + { url = "https://files.pythonhosted.org/packages/32/08/de43984c74ed1fca5c014808963cc83cb00d7bb06af228f132d33862ca76/aiohttp-3.13.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:87b9aab6d6ed88235aa2970294f496ff1a1f9adcd724d800e9b952395a80ffd9", size = 491783, upload-time = "2026-01-03T17:30:47.466Z" }, + { url = "https://files.pythonhosted.org/packages/17/f8/8dd2cf6112a5a76f81f81a5130c57ca829d101ad583ce57f889179accdda/aiohttp-3.13.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:425c126c0dc43861e22cb1c14ba4c8e45d09516d0a3ae0a3f7494b79f5f233a3", size = 490704, upload-time = "2026-01-03T17:30:49.373Z" }, + { url = "https://files.pythonhosted.org/packages/6d/40/a46b03ca03936f832bc7eaa47cfbb1ad012ba1be4790122ee4f4f8cba074/aiohttp-3.13.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7f9120f7093c2a32d9647abcaf21e6ad275b4fbec5b55969f978b1a97c7c86bf", size = 1720652, upload-time = "2026-01-03T17:30:50.974Z" }, + { url = "https://files.pythonhosted.org/packages/f7/7e/917fe18e3607af92657e4285498f500dca797ff8c918bd7d90b05abf6c2a/aiohttp-3.13.3-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:697753042d57f4bf7122cab985bf15d0cef23c770864580f5af4f52023a56bd6", size = 1692014, upload-time = "2026-01-03T17:30:52.729Z" }, + { url = "https://files.pythonhosted.org/packages/71/b6/cefa4cbc00d315d68973b671cf105b21a609c12b82d52e5d0c9ae61d2a09/aiohttp-3.13.3-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:6de499a1a44e7de70735d0b39f67c8f25eb3d91eb3103be99ca0fa882cdd987d", size = 1759777, upload-time = "2026-01-03T17:30:54.537Z" }, + { url = "https://files.pythonhosted.org/packages/fb/e3/e06ee07b45e59e6d81498b591fc589629be1553abb2a82ce33efe2a7b068/aiohttp-3.13.3-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:37239e9f9a7ea9ac5bf6b92b0260b01f8a22281996da609206a84df860bc1261", size = 1861276, upload-time = "2026-01-03T17:30:56.512Z" }, + { url = "https://files.pythonhosted.org/packages/7c/24/75d274228acf35ceeb2850b8ce04de9dd7355ff7a0b49d607ee60c29c518/aiohttp-3.13.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f76c1e3fe7d7c8afad7ed193f89a292e1999608170dcc9751a7462a87dfd5bc0", size = 1743131, upload-time = "2026-01-03T17:30:58.256Z" }, + { url = "https://files.pythonhosted.org/packages/04/98/3d21dde21889b17ca2eea54fdcff21b27b93f45b7bb94ca029c31ab59dc3/aiohttp-3.13.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fc290605db2a917f6e81b0e1e0796469871f5af381ce15c604a3c5c7e51cb730", size = 1556863, upload-time = "2026-01-03T17:31:00.445Z" }, + { url = "https://files.pythonhosted.org/packages/9e/84/da0c3ab1192eaf64782b03971ab4055b475d0db07b17eff925e8c93b3aa5/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:4021b51936308aeea0367b8f006dc999ca02bc118a0cc78c303f50a2ff6afb91", size = 1682793, upload-time = "2026-01-03T17:31:03.024Z" }, + { url = "https://files.pythonhosted.org/packages/ff/0f/5802ada182f575afa02cbd0ec5180d7e13a402afb7c2c03a9aa5e5d49060/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:49a03727c1bba9a97d3e93c9f93ca03a57300f484b6e935463099841261195d3", size = 1716676, upload-time = "2026-01-03T17:31:04.842Z" }, + { url = "https://files.pythonhosted.org/packages/3f/8c/714d53bd8b5a4560667f7bbbb06b20c2382f9c7847d198370ec6526af39c/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:3d9908a48eb7416dc1f4524e69f1d32e5d90e3981e4e37eb0aa1cd18f9cfa2a4", size = 1733217, upload-time = "2026-01-03T17:31:06.868Z" }, + { url = "https://files.pythonhosted.org/packages/7d/79/e2176f46d2e963facea939f5be2d26368ce543622be6f00a12844d3c991f/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2712039939ec963c237286113c68dbad80a82a4281543f3abf766d9d73228998", size = 1552303, upload-time = "2026-01-03T17:31:08.958Z" }, + { url = "https://files.pythonhosted.org/packages/ab/6a/28ed4dea1759916090587d1fe57087b03e6c784a642b85ef48217b0277ae/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:7bfdc049127717581866fa4708791220970ce291c23e28ccf3922c700740fdc0", size = 1763673, upload-time = "2026-01-03T17:31:10.676Z" }, + { url = "https://files.pythonhosted.org/packages/e8/35/4a3daeb8b9fab49240d21c04d50732313295e4bd813a465d840236dd0ce1/aiohttp-3.13.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8057c98e0c8472d8846b9c79f56766bcc57e3e8ac7bfd510482332366c56c591", size = 1721120, upload-time = "2026-01-03T17:31:12.575Z" }, + { url = "https://files.pythonhosted.org/packages/bc/9f/d643bb3c5fb99547323e635e251c609fbbc660d983144cfebec529e09264/aiohttp-3.13.3-cp313-cp313-win32.whl", hash = "sha256:1449ceddcdbcf2e0446957863af03ebaaa03f94c090f945411b61269e2cb5daf", size = 427383, upload-time = "2026-01-03T17:31:14.382Z" }, + { url = "https://files.pythonhosted.org/packages/4e/f1/ab0395f8a79933577cdd996dd2f9aa6014af9535f65dddcf88204682fe62/aiohttp-3.13.3-cp313-cp313-win_amd64.whl", hash = "sha256:693781c45a4033d31d4187d2436f5ac701e7bbfe5df40d917736108c1cc7436e", size = 453899, upload-time = "2026-01-03T17:31:15.958Z" }, + { url = "https://files.pythonhosted.org/packages/99/36/5b6514a9f5d66f4e2597e40dea2e3db271e023eb7a5d22defe96ba560996/aiohttp-3.13.3-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:ea37047c6b367fd4bd632bff8077449b8fa034b69e812a18e0132a00fae6e808", size = 737238, upload-time = "2026-01-03T17:31:17.909Z" }, + { url = "https://files.pythonhosted.org/packages/f7/49/459327f0d5bcd8c6c9ca69e60fdeebc3622861e696490d8674a6d0cb90a6/aiohttp-3.13.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6fc0e2337d1a4c3e6acafda6a78a39d4c14caea625124817420abceed36e2415", size = 492292, upload-time = "2026-01-03T17:31:19.919Z" }, + { url = "https://files.pythonhosted.org/packages/e8/0b/b97660c5fd05d3495b4eb27f2d0ef18dc1dc4eff7511a9bf371397ff0264/aiohttp-3.13.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c685f2d80bb67ca8c3837823ad76196b3694b0159d232206d1e461d3d434666f", size = 493021, upload-time = "2026-01-03T17:31:21.636Z" }, + { url = "https://files.pythonhosted.org/packages/54/d4/438efabdf74e30aeceb890c3290bbaa449780583b1270b00661126b8aae4/aiohttp-3.13.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:48e377758516d262bde50c2584fc6c578af272559c409eecbdd2bae1601184d6", size = 1717263, upload-time = "2026-01-03T17:31:23.296Z" }, + { url = "https://files.pythonhosted.org/packages/71/f2/7bddc7fd612367d1459c5bcf598a9e8f7092d6580d98de0e057eb42697ad/aiohttp-3.13.3-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:34749271508078b261c4abb1767d42b8d0c0cc9449c73a4df494777dc55f0687", size = 1669107, upload-time = "2026-01-03T17:31:25.334Z" }, + { url = "https://files.pythonhosted.org/packages/00/5a/1aeaecca40e22560f97610a329e0e5efef5e0b5afdf9f857f0d93839ab2e/aiohttp-3.13.3-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:82611aeec80eb144416956ec85b6ca45a64d76429c1ed46ae1b5f86c6e0c9a26", size = 1760196, upload-time = "2026-01-03T17:31:27.394Z" }, + { url = "https://files.pythonhosted.org/packages/f8/f8/0ff6992bea7bd560fc510ea1c815f87eedd745fe035589c71ce05612a19a/aiohttp-3.13.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2fff83cfc93f18f215896e3a190e8e5cb413ce01553901aca925176e7568963a", size = 1843591, upload-time = "2026-01-03T17:31:29.238Z" }, + { url = "https://files.pythonhosted.org/packages/e3/d1/e30e537a15f53485b61f5be525f2157da719819e8377298502aebac45536/aiohttp-3.13.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bbe7d4cecacb439e2e2a8a1a7b935c25b812af7a5fd26503a66dadf428e79ec1", size = 1720277, upload-time = "2026-01-03T17:31:31.053Z" }, + { url = "https://files.pythonhosted.org/packages/84/45/23f4c451d8192f553d38d838831ebbc156907ea6e05557f39563101b7717/aiohttp-3.13.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b928f30fe49574253644b1ca44b1b8adbd903aa0da4b9054a6c20fc7f4092a25", size = 1548575, upload-time = "2026-01-03T17:31:32.87Z" }, + { url = "https://files.pythonhosted.org/packages/6a/ed/0a42b127a43712eda7807e7892c083eadfaf8429ca8fb619662a530a3aab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7b5e8fe4de30df199155baaf64f2fcd604f4c678ed20910db8e2c66dc4b11603", size = 1679455, upload-time = "2026-01-03T17:31:34.76Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b5/c05f0c2b4b4fe2c9d55e73b6d3ed4fd6c9dc2684b1d81cbdf77e7fad9adb/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:8542f41a62bcc58fc7f11cf7c90e0ec324ce44950003feb70640fc2a9092c32a", size = 1687417, upload-time = "2026-01-03T17:31:36.699Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6b/915bc5dad66aef602b9e459b5a973529304d4e89ca86999d9d75d80cbd0b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:5e1d8c8b8f1d91cd08d8f4a3c2b067bfca6ec043d3ff36de0f3a715feeedf926", size = 1729968, upload-time = "2026-01-03T17:31:38.622Z" }, + { url = "https://files.pythonhosted.org/packages/11/3b/e84581290a9520024a08640b63d07673057aec5ca548177a82026187ba73/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:90455115e5da1c3c51ab619ac57f877da8fd6d73c05aacd125c5ae9819582aba", size = 1545690, upload-time = "2026-01-03T17:31:40.57Z" }, + { url = "https://files.pythonhosted.org/packages/f5/04/0c3655a566c43fd647c81b895dfe361b9f9ad6d58c19309d45cff52d6c3b/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:042e9e0bcb5fba81886c8b4fbb9a09d6b8a00245fd8d88e4d989c1f96c74164c", size = 1746390, upload-time = "2026-01-03T17:31:42.857Z" }, + { url = "https://files.pythonhosted.org/packages/1f/53/71165b26978f719c3419381514c9690bd5980e764a09440a10bb816ea4ab/aiohttp-3.13.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2eb752b102b12a76ca02dff751a801f028b4ffbbc478840b473597fc91a9ed43", size = 1702188, upload-time = "2026-01-03T17:31:44.984Z" }, + { url = "https://files.pythonhosted.org/packages/29/a7/cbe6c9e8e136314fa1980da388a59d2f35f35395948a08b6747baebb6aa6/aiohttp-3.13.3-cp314-cp314-win32.whl", hash = "sha256:b556c85915d8efaed322bf1bdae9486aa0f3f764195a0fb6ee962e5c71ef5ce1", size = 433126, upload-time = "2026-01-03T17:31:47.463Z" }, + { url = "https://files.pythonhosted.org/packages/de/56/982704adea7d3b16614fc5936014e9af85c0e34b58f9046655817f04306e/aiohttp-3.13.3-cp314-cp314-win_amd64.whl", hash = "sha256:9bf9f7a65e7aa20dd764151fb3d616c81088f91f8df39c3893a536e279b4b984", size = 459128, upload-time = "2026-01-03T17:31:49.2Z" }, + { url = "https://files.pythonhosted.org/packages/6c/2a/3c79b638a9c3d4658d345339d22070241ea341ed4e07b5ac60fb0f418003/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:05861afbbec40650d8a07ea324367cb93e9e8cc7762e04dd4405df99fa65159c", size = 769512, upload-time = "2026-01-03T17:31:51.134Z" }, + { url = "https://files.pythonhosted.org/packages/29/b9/3e5014d46c0ab0db8707e0ac2711ed28c4da0218c358a4e7c17bae0d8722/aiohttp-3.13.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2fc82186fadc4a8316768d61f3722c230e2c1dcab4200d52d2ebdf2482e47592", size = 506444, upload-time = "2026-01-03T17:31:52.85Z" }, + { url = "https://files.pythonhosted.org/packages/90/03/c1d4ef9a054e151cd7839cdc497f2638f00b93cbe8043983986630d7a80c/aiohttp-3.13.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:0add0900ff220d1d5c5ebbf99ed88b0c1bbf87aa7e4262300ed1376a6b13414f", size = 510798, upload-time = "2026-01-03T17:31:54.91Z" }, + { url = "https://files.pythonhosted.org/packages/ea/76/8c1e5abbfe8e127c893fe7ead569148a4d5a799f7cf958d8c09f3eedf097/aiohttp-3.13.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:568f416a4072fbfae453dcf9a99194bbb8bdeab718e08ee13dfa2ba0e4bebf29", size = 1868835, upload-time = "2026-01-03T17:31:56.733Z" }, + { url = "https://files.pythonhosted.org/packages/8e/ac/984c5a6f74c363b01ff97adc96a3976d9c98940b8969a1881575b279ac5d/aiohttp-3.13.3-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:add1da70de90a2569c5e15249ff76a631ccacfe198375eead4aadf3b8dc849dc", size = 1720486, upload-time = "2026-01-03T17:31:58.65Z" }, + { url = "https://files.pythonhosted.org/packages/b2/9a/b7039c5f099c4eb632138728828b33428585031a1e658d693d41d07d89d1/aiohttp-3.13.3-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:10b47b7ba335d2e9b1239fa571131a87e2d8ec96b333e68b2a305e7a98b0bae2", size = 1847951, upload-time = "2026-01-03T17:32:00.989Z" }, + { url = "https://files.pythonhosted.org/packages/3c/02/3bec2b9a1ba3c19ff89a43a19324202b8eb187ca1e928d8bdac9bbdddebd/aiohttp-3.13.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:3dd4dce1c718e38081c8f35f323209d4c1df7d4db4bab1b5c88a6b4d12b74587", size = 1941001, upload-time = "2026-01-03T17:32:03.122Z" }, + { url = "https://files.pythonhosted.org/packages/37/df/d879401cedeef27ac4717f6426c8c36c3091c6e9f08a9178cc87549c537f/aiohttp-3.13.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34bac00a67a812570d4a460447e1e9e06fae622946955f939051e7cc895cfab8", size = 1797246, upload-time = "2026-01-03T17:32:05.255Z" }, + { url = "https://files.pythonhosted.org/packages/8d/15/be122de1f67e6953add23335c8ece6d314ab67c8bebb3f181063010795a7/aiohttp-3.13.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a19884d2ee70b06d9204b2727a7b9f983d0c684c650254679e716b0b77920632", size = 1627131, upload-time = "2026-01-03T17:32:07.607Z" }, + { url = "https://files.pythonhosted.org/packages/12/12/70eedcac9134cfa3219ab7af31ea56bc877395b1ac30d65b1bc4b27d0438/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5f8ca7f2bb6ba8348a3614c7918cc4bb73268c5ac2a207576b7afea19d3d9f64", size = 1795196, upload-time = "2026-01-03T17:32:09.59Z" }, + { url = "https://files.pythonhosted.org/packages/32/11/b30e1b1cd1f3054af86ebe60df96989c6a414dd87e27ad16950eee420bea/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:b0d95340658b9d2f11d9697f59b3814a9d3bb4b7a7c20b131df4bcef464037c0", size = 1782841, upload-time = "2026-01-03T17:32:11.445Z" }, + { url = "https://files.pythonhosted.org/packages/88/0d/d98a9367b38912384a17e287850f5695c528cff0f14f791ce8ee2e4f7796/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:a1e53262fd202e4b40b70c3aff944a8155059beedc8a89bba9dc1f9ef06a1b56", size = 1795193, upload-time = "2026-01-03T17:32:13.705Z" }, + { url = "https://files.pythonhosted.org/packages/43/a5/a2dfd1f5ff5581632c7f6a30e1744deda03808974f94f6534241ef60c751/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:d60ac9663f44168038586cab2157e122e46bdef09e9368b37f2d82d354c23f72", size = 1621979, upload-time = "2026-01-03T17:32:15.965Z" }, + { url = "https://files.pythonhosted.org/packages/fa/f0/12973c382ae7c1cccbc4417e129c5bf54c374dfb85af70893646e1f0e749/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:90751b8eed69435bac9ff4e3d2f6b3af1f57e37ecb0fbeee59c0174c9e2d41df", size = 1822193, upload-time = "2026-01-03T17:32:18.219Z" }, + { url = "https://files.pythonhosted.org/packages/3c/5f/24155e30ba7f8c96918af1350eb0663e2430aad9e001c0489d89cd708ab1/aiohttp-3.13.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:fc353029f176fd2b3ec6cfc71be166aba1936fe5d73dd1992ce289ca6647a9aa", size = 1769801, upload-time = "2026-01-03T17:32:20.25Z" }, + { url = "https://files.pythonhosted.org/packages/eb/f8/7314031ff5c10e6ece114da79b338ec17eeff3a079e53151f7e9f43c4723/aiohttp-3.13.3-cp314-cp314t-win32.whl", hash = "sha256:2e41b18a58da1e474a057b3d35248d8320029f61d70a37629535b16a0c8f3767", size = 466523, upload-time = "2026-01-03T17:32:22.215Z" }, + { url = "https://files.pythonhosted.org/packages/b4/63/278a98c715ae467624eafe375542d8ba9b4383a016df8fdefe0ae28382a7/aiohttp-3.13.3-cp314-cp314t-win_amd64.whl", hash = "sha256:44531a36aa2264a1860089ffd4dce7baf875ee5a6079d5fb42e261c704ef7344", size = 499694, upload-time = "2026-01-03T17:32:24.546Z" }, ] [[package]] @@ -441,14 +439,14 @@ wheels = [ [[package]] name = "authlib" -version = "1.6.5" +version = "1.6.6" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cryptography" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/cd/3f/1d3bbd0bf23bdd99276d4def22f29c27a914067b4cf66f753ff9b8bbd0f3/authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b", size = 164553, upload-time = "2025-10-02T13:36:09.489Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/9b/b1661026ff24bc641b76b78c5222d614776b0c085bcfdac9bd15a1cb4b35/authlib-1.6.6.tar.gz", hash = "sha256:45770e8e056d0f283451d9996fbb59b70d45722b45d854d58f32878d0a40c38e", size = 164894, upload-time = "2025-12-12T08:01:41.464Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f8/aa/5082412d1ee302e9e7d80b6949bc4d2a8fa1149aaab610c5fc24709605d6/authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a", size = 243608, upload-time = "2025-10-02T13:36:07.637Z" }, + { url = "https://files.pythonhosted.org/packages/54/51/321e821856452f7386c4e9df866f196720b1ad0c5ea1623ea7399969ae3b/authlib-1.6.6-py2.py3-none-any.whl", hash = "sha256:7d9e9bc535c13974313a87f53e8430eb6ea3d1cf6ae4f6efcd793f2e949143fd", size = 244005, upload-time = "2025-12-12T08:01:40.209Z" }, ] [[package]] @@ -930,7 +928,7 @@ wheels = [ [[package]] name = "datasets" -version = "4.3.0" +version = "4.4.2" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "dill" }, @@ -948,9 +946,9 @@ dependencies = [ { name = "tqdm" }, { name = "xxhash" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/2a/47/325206ac160f7699ed9f1798afa8f8f8d5189b03bf3815654859ac1d5cba/datasets-4.3.0.tar.gz", hash = "sha256:bc9118ed9afd92346c5be7ed3aaa00177eb907c25467f9d072a0d22777efbd2b", size = 582801, upload-time = "2025-10-23T16:31:51.547Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c4/54/9359803da96bc65439a28fbb014dc2c90b7d4d8034a93b72362b0d40191f/datasets-4.4.2.tar.gz", hash = "sha256:9de16e415c4ba4713eac0493f7c7dc74f3aa21599297f00cc6ddab409cb7b24b", size = 586474, upload-time = "2025-12-19T15:03:09.129Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ca/51/409a8184ed35453d9cbb3d6b20d524b1115c2c2d117b85d5e9b06cd70b45/datasets-4.3.0-py3-none-any.whl", hash = "sha256:0ea157e72138b3ca6c7d2415f19a164ecf7d4c4fa72da2a570da286882e96903", size = 506846, upload-time = "2025-10-23T16:31:49.965Z" }, + { url = "https://files.pythonhosted.org/packages/7b/b5/fefa518c809de7bced5cddb7c21c010da66fa2ae494bda96844a280cc6ce/datasets-4.4.2-py3-none-any.whl", hash = "sha256:6f5ef3417504d9cd663c71c1b90b9a494ff4c2076a2cd6a6e40ceee6ad95befc", size = 512268, upload-time = "2025-12-19T15:03:07.087Z" }, ] [[package]] @@ -1189,11 +1187,11 @@ wheels = [ [[package]] name = "filelock" -version = "3.20.1" +version = "3.20.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" }, + { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" }, ] [[package]] @@ -1519,7 +1517,7 @@ grpc = [ [[package]] name = "gradio" -version = "5.49.1" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiofiles" }, @@ -1543,7 +1541,6 @@ dependencies = [ { name = "pydub" }, { name = "python-multipart" }, { name = "pyyaml" }, - { name = "ruff" }, { name = "safehttpx" }, { name = "semantic-version" }, { name = "starlette" }, @@ -1552,14 +1549,14 @@ dependencies = [ { name = "typing-extensions" }, { name = "uvicorn" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/83/67/17b3969a686f204dfb8f06bd34d1423bcba1df8a2f3674f115ca427188b7/gradio-5.49.1.tar.gz", hash = "sha256:c06faa324ae06c3892c8b4b4e73c706c4520d380f6b9e52a3c02dc53a7627ba9", size = 73784504, upload-time = "2025-10-08T20:18:40.4Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4b/cb/ce9c99e4026c7daefef2fe6736207a9189571ddefc1277438103e3e413f2/gradio-6.1.0.tar.gz", hash = "sha256:fe9f6757d53ce7840b487a6921151d8c3410f7de6e2152a4407c5eded9ce023a", size = 37852914, upload-time = "2025-12-09T19:31:53.996Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/8d/95/1c25fbcabfa201ab79b016c8716a4ac0f846121d4bbfd2136ffb6d87f31e/gradio-5.49.1-py3-none-any.whl", hash = "sha256:1b19369387801a26a6ba7fd2f74d46c5b0e2ac9ddef14f24ddc0d11fb19421b7", size = 63523840, upload-time = "2025-10-08T20:18:34.585Z" }, + { url = "https://files.pythonhosted.org/packages/ae/00/592f02d2f8a815fc3370f3cda70fb2116d6ec31cf3fe33c87fd34d0a1778/gradio-6.1.0-py3-none-any.whl", hash = "sha256:528f17d75c8206da77a4646955678df8a786145b7bdfcba61d14b2fb3cb94b98", size = 22967810, upload-time = "2025-12-09T19:31:51.335Z" }, ] [[package]] name = "gradio-client" -version = "1.13.3" +version = "2.0.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "fsspec" }, @@ -1567,11 +1564,10 @@ dependencies = [ { name = "huggingface-hub" }, { name = "packaging" }, { name = "typing-extensions" }, - { name = "websockets" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3e/a9/a3beb0ece8c05c33e6376b790fa42e0dd157abca8220cf639b249a597467/gradio_client-1.13.3.tar.gz", hash = "sha256:869b3e67e0f7a0f40df8c48c94de99183265cf4b7b1d9bd4623e336d219ffbe7", size = 323253, upload-time = "2025-09-26T19:51:21.7Z" } +sdist = { url = "https://files.pythonhosted.org/packages/4e/cc/b0f04b1c9bf79c7ae9840b9945f5fbd93355719684f83032837695ab1eaf/gradio_client-2.0.1.tar.gz", hash = "sha256:087eb50652370747c0ce66cd0ae79ecb49f9682188d5348e279d44602cbc2814", size = 54792, upload-time = "2025-12-02T01:57:58.685Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/6e/0b/337b74504681b5dde39f20d803bb09757f9973ecdc65fd4e819d4b11faf7/gradio_client-1.13.3-py3-none-any.whl", hash = "sha256:3f63e4d33a2899c1a12b10fe3cf77b82a6919ff1a1fb6391f6aa225811aa390c", size = 325350, upload-time = "2025-09-26T19:51:20.288Z" }, + { url = "https://files.pythonhosted.org/packages/1b/11/d680ecf4073bd1cacfe9dea57fa95660e4ea2d1fff3125dbaaa902cc9095/gradio_client-2.0.1-py3-none-any.whl", hash = "sha256:6322eecb5963a07703306c0b048bb98518063d05ca99a65fe384417188af8c63", size = 55439, upload-time = "2025-12-02T01:57:57.551Z" }, ] [[package]] @@ -5108,11 +5104,11 @@ wheels = [ [[package]] name = "urllib3" -version = "2.6.1" +version = "2.6.3" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5e/1d/0f3a93cca1ac5e8287842ed4eebbd0f7a991315089b1a0b01c7788aa7b63/urllib3-2.6.1.tar.gz", hash = "sha256:5379eb6e1aba4088bae84f8242960017ec8d8e3decf30480b3a1abdaa9671a3f", size = 432678, upload-time = "2025-12-08T15:25:26.773Z" } +sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bc/56/190ceb8cb10511b730b564fb1e0293fa468363dbad26145c34928a60cb0c/urllib3-2.6.1-py3-none-any.whl", hash = "sha256:e67d06fe947c36a7ca39f4994b08d73922d40e6cca949907be05efa6fd75110b", size = 131138, upload-time = "2025-12-08T15:25:25.51Z" }, + { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" }, ] [[package]] @@ -5182,16 +5178,16 @@ wheels = [ [[package]] name = "virtualenv" -version = "20.35.4" +version = "20.36.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "distlib" }, { name = "filelock" }, { name = "platformdirs" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/20/28/e6f1a6f655d620846bd9df527390ecc26b3805a0c5989048c210e22c5ca9/virtualenv-20.35.4.tar.gz", hash = "sha256:643d3914d73d3eeb0c552cbb12d7e82adf0e504dbf86a3182f8771a153a1971c", size = 6028799, upload-time = "2025-10-29T06:57:40.511Z" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/a3/4d310fa5f00863544e1d0f4de93bddec248499ccf97d4791bc3122c9d4f3/virtualenv-20.36.1.tar.gz", hash = "sha256:8befb5c81842c641f8ee658481e42641c68b5eab3521d8e092d18320902466ba", size = 6032239, upload-time = "2026-01-09T18:21:01.296Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/79/0c/c05523fa3181fdf0c9c52a6ba91a23fbf3246cc095f26f6516f9c60e6771/virtualenv-20.35.4-py3-none-any.whl", hash = "sha256:c21c9cede36c9753eeade68ba7d523529f228a403463376cf821eaae2b650f1b", size = 6005095, upload-time = "2025-10-29T06:57:37.598Z" }, + { url = "https://files.pythonhosted.org/packages/6a/2a/dc2228b2888f51192c7dc766106cd475f1b768c10caaf9727659726f7391/virtualenv-20.36.1-py3-none-any.whl", hash = "sha256:575a8d6b124ef88f6f51d56d656132389f961062a9177016a50e4f507bbcc19f", size = 6008258, upload-time = "2026-01-09T18:20:59.425Z" }, ] [[package]]