Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/google/adk/agents/remote_a2a_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ def __init__(
Args:
name: Agent name (must be unique identifier)
agent_card: AgentCard object, URL string, or file path string
description: Agent description (auto-populated from card if empty)
description: Agent description (autopopulated from card if empty)
httpx_client: Optional shared HTTP client (will create own if not
provided) [deprecated] Use a2a_client_factory instead.
timeout: HTTP timeout in seconds
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/cli/adk_web_server.py
Original file line number Diff line number Diff line change
Expand Up @@ -395,7 +395,7 @@ def _setup_gcp_telemetry(
# TODO - use trace_to_cloud here as well once otel_to_cloud is no
# longer experimental.
enable_cloud_tracing=True,
# TODO - reenable metrics once errors during shutdown are fixed.
# TODO - re-enable metrics once errors during shutdown are fixed.
enable_cloud_metrics=False,
enable_cloud_logging=True,
google_auth=(credentials, project_id),
Expand Down
6 changes: 3 additions & 3 deletions src/google/adk/cli/cli_tools_click.py
Original file line number Diff line number Diff line change
Expand Up @@ -1299,7 +1299,7 @@ def cli_web(
):
"""Starts a FastAPI server with Web UI for agents.

AGENTS_DIR: The directory of agents, where each sub-directory is a single
AGENTS_DIR: The directory of agents, where each subdirectory is a single
agent, containing at least `__init__.py` and `agent.py` files.

Example:
Expand Down Expand Up @@ -1366,7 +1366,7 @@ async def _lifespan(app: FastAPI):

@main.command("api_server")
@feature_options()
# The directory of agents, where each sub-directory is a single agent.
# The directory of agents, where each subdirectory is a single agent.
# By default, it is the current working directory
@click.argument(
"agents_dir",
Expand Down Expand Up @@ -1401,7 +1401,7 @@ def cli_api_server(
):
"""Starts a FastAPI server for agents.

AGENTS_DIR: The directory of agents, where each sub-directory is a single
AGENTS_DIR: The directory of agents, where each subdirectory is a single
agent, containing at least `__init__.py` and `agent.py` files.

Example:
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/evaluation/eval_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ class EvalConfig(BaseModel):
In the sample below, `tool_trajectory_avg_score`, `response_match_score` and
`final_response_match_v2` are the standard eval metric names, represented as
keys in the dictionary. The values in the dictionary are the corresponding
criterions. For the first two metrics, we use simple threshold as the criterion,
criteria. For the first two metrics, we use simple threshold as the criterion,
the third one uses `LlmAsAJudgeCriterion`.
{
"criteria": {
Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/evaluation/rubric_based_evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ class PerInvocationResultsAggregator(abc.ABC):
"""An interface for aggregating per invocation samples.

AutoRaters that are backed by an LLM are known to have certain degree of
unreliabilty to their responses. In order to counter that we sample the
unreliability to their responses. In order to counter that we sample the
autorater more than once for a single invocation.

The aggregator helps convert those multiple samples into a single result.
Expand Down Expand Up @@ -419,7 +419,7 @@ def aggregate_per_invocation_samples(
"""Returns a combined result by aggregating multiple samples for the same invocation.

AutoRaters that are backed by an LLM are known to have certain degree of
unreliabilty to their responses. In order to counter that we sample the
unreliability to their responses. In order to counter that we sample the
autorater more than once for a single invocation.

The aggregator helps convert those multiple samples into a single result.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@

# Definition of Conversation History
The Conversation History is the actual dialogue between the User Simulator and the Agent.
The Conversation History may not be complete, but the exsisting dialogue should adhere to the Conversation Plan.
The Conversation History may not be complete, but the existing dialogue should adhere to the Conversation Plan.
The Conversation History may contain instances where the User Simulator troubleshoots an incorrect/inappropriate response from the Agent in order to enforce the Conversation Plan.
The Conversation History is finished only when the User Simulator outputs `{stop_signal}` in its response. If this token is missing, the conversation between the User Simulator and the Agent has not finished, and more turns can be generated.

Expand Down Expand Up @@ -171,7 +171,7 @@ def _parse_llm_response(response: str) -> Label:
response,
)

# If there was not match for "is_valid", return NOT_FOUND
# If there was no match for "is_valid", return NOT_FOUND
if is_valid_match is None:
return Label.NOT_FOUND

Expand Down
4 changes: 2 additions & 2 deletions src/google/adk/flows/llm_flows/functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,7 +410,7 @@ async def _run_with_trace():
function_response = altered_function_response

if tool.is_long_running:
# Allow long running function to return None to not provide function
# Allow long-running function to return None to not provide function
# response.
if not function_response:
return None
Expand Down Expand Up @@ -893,7 +893,7 @@ def find_matching_function_call(
)
for i in range(len(events) - 2, -1, -1):
event = events[i]
# looking for the system long running request euc function call
# looking for the system long-running request euc function call
function_calls = event.get_function_calls()
if not function_calls:
continue
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/memory/vertex_ai_rag_memory_service.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def __init__(
or ``{rag_corpus_id}``
similarity_top_k: The number of contexts to retrieve.
vector_distance_threshold: Only returns contexts with vector distance
smaller than the threshold..
smaller than the threshold.
"""
self._vertex_rag_store = types.VertexRagStore(
rag_resources=[
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/models/google_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@


class _ResourceExhaustedError(ClientError):
"""Represents an resources exhausted error received from the Model."""
"""Represents a resources exhausted error received from the Model."""

def __init__(
self,
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/models/lite_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@
def _ensure_litellm_imported() -> None:
"""Imports LiteLLM with safe defaults.

LiteLLM defaults to DEV mode, which auto-loads a local `.env` at import time.
LiteLLM defaults to DEV mode, which autoloads a local `.env` at import time.
ADK should not implicitly load `.env` just because LiteLLM is installed.

Users can opt into LiteLLM's default behavior by setting LITELLM_MODE=DEV.
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/plugins/base_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ class BasePlugin(ABC):
callback in the chain. For example, if a plugin modifies the tool input with
before_tool_callback, the modified tool input will be passed to the
before_tool_callback of the next plugin, and further passed to the agent
callbacks if not short circuited.
callbacks if not short-circuited.

To use a plugin, implement the desired callback methods and pass an instance
of your custom plugin class to the ADK Runner.
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/plugins/logging_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
class LoggingPlugin(BasePlugin):
"""A plugin that logs important information at each callback point.

This plugin helps printing all critical events in the console. It is not a
This plugin helps print all critical events in the console. It is not a
replacement of existing logging in ADK. It rather helps terminal based
debugging by showing all logs in the console, and serves as a simple demo for
everyone to leverage when developing new plugins.
Expand Down
12 changes: 6 additions & 6 deletions src/google/adk/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ async def _exec_with_plugin(
else:
# Step 2: Otherwise continue with normal execution
# Note for live/bidi:
# the transcription may arrive later then the action(function call
# the transcription may arrive later than the action(function call
# event and thus function response event). In this case, the order of
# transcription and function call event will be wrong if we just
# append as it arrives. To address this, we should check if there is
Expand All @@ -770,7 +770,7 @@ async def _exec_with_plugin(
# identified by checking if the transcription event is partial. When
# the next transcription event is not partial, it means the previous
# transcription is finished. Then if there is any buffered function
# call event, we should append them after this finished(non-parital)
# call event, we should append them after this finished(non-partial)
# transcription event.
buffered_events: list[Event] = []
is_transcribing: bool = False
Expand All @@ -789,7 +789,7 @@ async def _exec_with_plugin(
buffered_events.append(event)
continue
# Note for live/bidi: for audio response, it's considered as
# non-paritla event(event.partial=None)
# non-partial event(event.partial=None)
# event.partial=False and event.partial=None are considered as
# non-partial event; event.partial=True is considered as partial
# event.
Expand Down Expand Up @@ -938,7 +938,7 @@ async def run_live(
* **Live Model Audio Events with Inline Data:** Events containing raw
audio `Blob` data(`inline_data`).
* **Live Model Audio Events with File Data:** Both input and ouput audio
data are aggregated into a audio file saved into artifacts. The
data are aggregated into an audio file saved into artifacts. The
reference to the file is saved in the event as `file_data`.
* **Usage Metadata:** Events containing token usage.
* **Transcription Events:** Both partial and non-partial transcription
Expand All @@ -948,7 +948,7 @@ async def run_live(

**Events Saved to the Session:**
* **Live Model Audio Events with File Data:** Both input and ouput audio
data are aggregated into a audio file saved into artifacts. The
data are aggregated into an audio file saved into artifacts. The
reference to the file is saved as event in the `file_data` to session
if RunConfig.save_live_model_audio_to_session is True.
* **Usage Metadata Events:** Saved to the session.
Expand Down Expand Up @@ -1099,7 +1099,7 @@ def _find_agent_to_run(
# If the last event is a function response, should send this response to
# the agent that returned the corresponding function call regardless the
# type of the agent. e.g. a remote a2a agent may surface a credential
# request as a special long running function tool call.
# request as a special long-running function tool call.
event = find_matching_function_call(session.events)
if event and event.author:
return root_agent.find_agent(event.author)
Expand Down
2 changes: 1 addition & 1 deletion src/google/adk/sessions/migration/migration_runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def upgrade(source_db_url: str, dest_db_url: str):
LATEST_VERSION.

If multiple migration steps are required, intermediate results are stored in
temporary SQLite database files. This means a multi-step migration
temporary SQLite database files. This means a multistep migration
between other database types (e.g. PostgreSQL to PostgreSQL) will use
SQLite for intermediate steps.

Expand Down
2 changes: 1 addition & 1 deletion tests/unittests/models/test_litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the Licens
# limitations under the License

import contextlib
import json
Expand Down
6 changes: 3 additions & 3 deletions tests/unittests/runners/test_runner_rewind.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,14 +130,14 @@ async def test_rewind_async_with_state_and_artifacts(self):
rewind_before_invocation_id="invocation2",
)

# 3. Verify state and artifacts are rewinded
# 3. Verify state and artifacts are rewound
session = await runner.session_service.get_session(
app_name=runner.app_name, user_id=user_id, session_id=session_id
)
# After rewind before invocation2, only event1 state delta should apply.
assert session.state["k1"] == "v1"
assert not session.state["k2"]
# f1 should be rewinded to v0
# f1 should be rewound to v0
assert await runner.artifact_service.load_artifact(
app_name=runner.app_name,
user_id=user_id,
Expand Down Expand Up @@ -226,7 +226,7 @@ async def test_rewind_async_not_first_invocation(self):
rewind_before_invocation_id="invocation3",
)

# 3. Verify state and artifacts are rewinded
# 3. Verify state and artifacts are rewound
session = await runner.session_service.get_session(
app_name=runner.app_name, user_id=user_id, session_id=session_id
)
Expand Down
Loading