diff --git a/src/google/adk/agents/remote_a2a_agent.py b/src/google/adk/agents/remote_a2a_agent.py index 7c894d54a3..2da7a4faa0 100644 --- a/src/google/adk/agents/remote_a2a_agent.py +++ b/src/google/adk/agents/remote_a2a_agent.py @@ -134,7 +134,7 @@ def __init__( Args: name: Agent name (must be unique identifier) agent_card: AgentCard object, URL string, or file path string - description: Agent description (auto-populated from card if empty) + description: Agent description (autopopulated from card if empty) httpx_client: Optional shared HTTP client (will create own if not provided) [deprecated] Use a2a_client_factory instead. timeout: HTTP timeout in seconds diff --git a/src/google/adk/cli/adk_web_server.py b/src/google/adk/cli/adk_web_server.py index 4404d62e4e..b97932d042 100644 --- a/src/google/adk/cli/adk_web_server.py +++ b/src/google/adk/cli/adk_web_server.py @@ -395,7 +395,7 @@ def _setup_gcp_telemetry( # TODO - use trace_to_cloud here as well once otel_to_cloud is no # longer experimental. enable_cloud_tracing=True, - # TODO - reenable metrics once errors during shutdown are fixed. + # TODO - re-enable metrics once errors during shutdown are fixed. enable_cloud_metrics=False, enable_cloud_logging=True, google_auth=(credentials, project_id), diff --git a/src/google/adk/cli/cli_tools_click.py b/src/google/adk/cli/cli_tools_click.py index 6058a063db..c4f1d405ad 100644 --- a/src/google/adk/cli/cli_tools_click.py +++ b/src/google/adk/cli/cli_tools_click.py @@ -1299,7 +1299,7 @@ def cli_web( ): """Starts a FastAPI server with Web UI for agents. - AGENTS_DIR: The directory of agents, where each sub-directory is a single + AGENTS_DIR: The directory of agents, where each subdirectory is a single agent, containing at least `__init__.py` and `agent.py` files. Example: @@ -1366,7 +1366,7 @@ async def _lifespan(app: FastAPI): @main.command("api_server") @feature_options() -# The directory of agents, where each sub-directory is a single agent. +# The directory of agents, where each subdirectory is a single agent. # By default, it is the current working directory @click.argument( "agents_dir", @@ -1401,7 +1401,7 @@ def cli_api_server( ): """Starts a FastAPI server for agents. - AGENTS_DIR: The directory of agents, where each sub-directory is a single + AGENTS_DIR: The directory of agents, where each subdirectory is a single agent, containing at least `__init__.py` and `agent.py` files. Example: diff --git a/src/google/adk/evaluation/eval_config.py b/src/google/adk/evaluation/eval_config.py index 185f02cc20..ead2303ceb 100644 --- a/src/google/adk/evaluation/eval_config.py +++ b/src/google/adk/evaluation/eval_config.py @@ -89,7 +89,7 @@ class EvalConfig(BaseModel): In the sample below, `tool_trajectory_avg_score`, `response_match_score` and `final_response_match_v2` are the standard eval metric names, represented as keys in the dictionary. The values in the dictionary are the corresponding -criterions. For the first two metrics, we use simple threshold as the criterion, +criteria. For the first two metrics, we use simple threshold as the criterion, the third one uses `LlmAsAJudgeCriterion`. { "criteria": { diff --git a/src/google/adk/evaluation/rubric_based_evaluator.py b/src/google/adk/evaluation/rubric_based_evaluator.py index 63c1d47d00..451a14f1a5 100644 --- a/src/google/adk/evaluation/rubric_based_evaluator.py +++ b/src/google/adk/evaluation/rubric_based_evaluator.py @@ -93,7 +93,7 @@ class PerInvocationResultsAggregator(abc.ABC): """An interface for aggregating per invocation samples. AutoRaters that are backed by an LLM are known to have certain degree of - unreliabilty to their responses. In order to counter that we sample the + unreliability to their responses. In order to counter that we sample the autorater more than once for a single invocation. The aggregator helps convert those multiple samples into a single result. @@ -419,7 +419,7 @@ def aggregate_per_invocation_samples( """Returns a combined result by aggregating multiple samples for the same invocation. AutoRaters that are backed by an LLM are known to have certain degree of - unreliabilty to their responses. In order to counter that we sample the + unreliability to their responses. In order to counter that we sample the autorater more than once for a single invocation. The aggregator helps convert those multiple samples into a single result. diff --git a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py index 5d6208a10a..ade65d72a7 100644 --- a/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py +++ b/src/google/adk/evaluation/simulation/per_turn_user_simulator_quality_v1.py @@ -58,7 +58,7 @@ # Definition of Conversation History The Conversation History is the actual dialogue between the User Simulator and the Agent. -The Conversation History may not be complete, but the exsisting dialogue should adhere to the Conversation Plan. +The Conversation History may not be complete, but the existing dialogue should adhere to the Conversation Plan. The Conversation History may contain instances where the User Simulator troubleshoots an incorrect/inappropriate response from the Agent in order to enforce the Conversation Plan. The Conversation History is finished only when the User Simulator outputs `{stop_signal}` in its response. If this token is missing, the conversation between the User Simulator and the Agent has not finished, and more turns can be generated. @@ -171,7 +171,7 @@ def _parse_llm_response(response: str) -> Label: response, ) - # If there was not match for "is_valid", return NOT_FOUND + # If there was no match for "is_valid", return NOT_FOUND if is_valid_match is None: return Label.NOT_FOUND diff --git a/src/google/adk/flows/llm_flows/functions.py b/src/google/adk/flows/llm_flows/functions.py index ec1cd2300a..e5f29372fa 100644 --- a/src/google/adk/flows/llm_flows/functions.py +++ b/src/google/adk/flows/llm_flows/functions.py @@ -410,7 +410,7 @@ async def _run_with_trace(): function_response = altered_function_response if tool.is_long_running: - # Allow long running function to return None to not provide function + # Allow long-running function to return None to not provide function # response. if not function_response: return None @@ -893,7 +893,7 @@ def find_matching_function_call( ) for i in range(len(events) - 2, -1, -1): event = events[i] - # looking for the system long running request euc function call + # looking for the system long-running request euc function call function_calls = event.get_function_calls() if not function_calls: continue diff --git a/src/google/adk/memory/vertex_ai_rag_memory_service.py b/src/google/adk/memory/vertex_ai_rag_memory_service.py index bd6e9dc2b0..001b30b541 100644 --- a/src/google/adk/memory/vertex_ai_rag_memory_service.py +++ b/src/google/adk/memory/vertex_ai_rag_memory_service.py @@ -52,7 +52,7 @@ def __init__( or ``{rag_corpus_id}`` similarity_top_k: The number of contexts to retrieve. vector_distance_threshold: Only returns contexts with vector distance - smaller than the threshold.. + smaller than the threshold. """ self._vertex_rag_store = types.VertexRagStore( rag_resources=[ diff --git a/src/google/adk/models/google_llm.py b/src/google/adk/models/google_llm.py index 733719b422..9015e4b4ee 100644 --- a/src/google/adk/models/google_llm.py +++ b/src/google/adk/models/google_llm.py @@ -58,7 +58,7 @@ class _ResourceExhaustedError(ClientError): - """Represents an resources exhausted error received from the Model.""" + """Represents a resources exhausted error received from the Model.""" def __init__( self, diff --git a/src/google/adk/models/lite_llm.py b/src/google/adk/models/lite_llm.py index 4131e47b8c..79182d7b0a 100644 --- a/src/google/adk/models/lite_llm.py +++ b/src/google/adk/models/lite_llm.py @@ -152,7 +152,7 @@ def _ensure_litellm_imported() -> None: """Imports LiteLLM with safe defaults. - LiteLLM defaults to DEV mode, which auto-loads a local `.env` at import time. + LiteLLM defaults to DEV mode, which autoloads a local `.env` at import time. ADK should not implicitly load `.env` just because LiteLLM is installed. Users can opt into LiteLLM's default behavior by setting LITELLM_MODE=DEV. diff --git a/src/google/adk/plugins/base_plugin.py b/src/google/adk/plugins/base_plugin.py index 2912ba0a7e..3639f61aa2 100644 --- a/src/google/adk/plugins/base_plugin.py +++ b/src/google/adk/plugins/base_plugin.py @@ -68,7 +68,7 @@ class BasePlugin(ABC): callback in the chain. For example, if a plugin modifies the tool input with before_tool_callback, the modified tool input will be passed to the before_tool_callback of the next plugin, and further passed to the agent - callbacks if not short circuited. + callbacks if not short-circuited. To use a plugin, implement the desired callback methods and pass an instance of your custom plugin class to the ADK Runner. diff --git a/src/google/adk/plugins/logging_plugin.py b/src/google/adk/plugins/logging_plugin.py index f44a75e04b..df37ee7ee4 100644 --- a/src/google/adk/plugins/logging_plugin.py +++ b/src/google/adk/plugins/logging_plugin.py @@ -36,7 +36,7 @@ class LoggingPlugin(BasePlugin): """A plugin that logs important information at each callback point. - This plugin helps printing all critical events in the console. It is not a + This plugin helps print all critical events in the console. It is not a replacement of existing logging in ADK. It rather helps terminal based debugging by showing all logs in the console, and serves as a simple demo for everyone to leverage when developing new plugins. diff --git a/src/google/adk/runners.py b/src/google/adk/runners.py index b931561c4d..3aaa54e257 100644 --- a/src/google/adk/runners.py +++ b/src/google/adk/runners.py @@ -760,7 +760,7 @@ async def _exec_with_plugin( else: # Step 2: Otherwise continue with normal execution # Note for live/bidi: - # the transcription may arrive later then the action(function call + # the transcription may arrive later than the action(function call # event and thus function response event). In this case, the order of # transcription and function call event will be wrong if we just # append as it arrives. To address this, we should check if there is @@ -770,7 +770,7 @@ async def _exec_with_plugin( # identified by checking if the transcription event is partial. When # the next transcription event is not partial, it means the previous # transcription is finished. Then if there is any buffered function - # call event, we should append them after this finished(non-parital) + # call event, we should append them after this finished(non-partial) # transcription event. buffered_events: list[Event] = [] is_transcribing: bool = False @@ -789,7 +789,7 @@ async def _exec_with_plugin( buffered_events.append(event) continue # Note for live/bidi: for audio response, it's considered as - # non-paritla event(event.partial=None) + # non-partial event(event.partial=None) # event.partial=False and event.partial=None are considered as # non-partial event; event.partial=True is considered as partial # event. @@ -938,7 +938,7 @@ async def run_live( * **Live Model Audio Events with Inline Data:** Events containing raw audio `Blob` data(`inline_data`). * **Live Model Audio Events with File Data:** Both input and ouput audio - data are aggregated into a audio file saved into artifacts. The + data are aggregated into an audio file saved into artifacts. The reference to the file is saved in the event as `file_data`. * **Usage Metadata:** Events containing token usage. * **Transcription Events:** Both partial and non-partial transcription @@ -948,7 +948,7 @@ async def run_live( **Events Saved to the Session:** * **Live Model Audio Events with File Data:** Both input and ouput audio - data are aggregated into a audio file saved into artifacts. The + data are aggregated into an audio file saved into artifacts. The reference to the file is saved as event in the `file_data` to session if RunConfig.save_live_model_audio_to_session is True. * **Usage Metadata Events:** Saved to the session. @@ -1099,7 +1099,7 @@ def _find_agent_to_run( # If the last event is a function response, should send this response to # the agent that returned the corresponding function call regardless the # type of the agent. e.g. a remote a2a agent may surface a credential - # request as a special long running function tool call. + # request as a special long-running function tool call. event = find_matching_function_call(session.events) if event and event.author: return root_agent.find_agent(event.author) diff --git a/src/google/adk/sessions/migration/migration_runner.py b/src/google/adk/sessions/migration/migration_runner.py index 56acd810fb..edb5c83bcb 100644 --- a/src/google/adk/sessions/migration/migration_runner.py +++ b/src/google/adk/sessions/migration/migration_runner.py @@ -49,7 +49,7 @@ def upgrade(source_db_url: str, dest_db_url: str): LATEST_VERSION. If multiple migration steps are required, intermediate results are stored in - temporary SQLite database files. This means a multi-step migration + temporary SQLite database files. This means a multistep migration between other database types (e.g. PostgreSQL to PostgreSQL) will use SQLite for intermediate steps. diff --git a/tests/unittests/models/test_litellm.py b/tests/unittests/models/test_litellm.py index 5ff541b834..2ebbc5dfe8 100644 --- a/tests/unittests/models/test_litellm.py +++ b/tests/unittests/models/test_litellm.py @@ -10,7 +10,7 @@ # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and -# limitations under the Licens +# limitations under the License import contextlib import json diff --git a/tests/unittests/runners/test_runner_rewind.py b/tests/unittests/runners/test_runner_rewind.py index f8010238bd..035d28437b 100644 --- a/tests/unittests/runners/test_runner_rewind.py +++ b/tests/unittests/runners/test_runner_rewind.py @@ -130,14 +130,14 @@ async def test_rewind_async_with_state_and_artifacts(self): rewind_before_invocation_id="invocation2", ) - # 3. Verify state and artifacts are rewinded + # 3. Verify state and artifacts are rewound session = await runner.session_service.get_session( app_name=runner.app_name, user_id=user_id, session_id=session_id ) # After rewind before invocation2, only event1 state delta should apply. assert session.state["k1"] == "v1" assert not session.state["k2"] - # f1 should be rewinded to v0 + # f1 should be rewound to v0 assert await runner.artifact_service.load_artifact( app_name=runner.app_name, user_id=user_id, @@ -226,7 +226,7 @@ async def test_rewind_async_not_first_invocation(self): rewind_before_invocation_id="invocation3", ) - # 3. Verify state and artifacts are rewinded + # 3. Verify state and artifacts are rewound session = await runner.session_service.get_session( app_name=runner.app_name, user_id=user_id, session_id=session_id )