Skip to content

Commit 7626d8f

Browse files
sararobcopybara-github
authored andcommitted
chore: Resolve all evals mypy errors
PiperOrigin-RevId: 862800470
1 parent d685d81 commit 7626d8f

3 files changed

Lines changed: 13 additions & 13 deletions

File tree

vertexai/_genai/_evals_common.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@
7171

7272

7373
@contextlib.contextmanager
74-
def _temp_logger_level(logger_name: str, level: int):
74+
def _temp_logger_level(logger_name: str, level: int) -> None:
7575
"""Temporarily sets the level of a logger."""
7676
logger_instance = logging.getLogger(logger_name)
7777
original_level = logger_instance.getEffectiveLevel()
@@ -95,7 +95,7 @@ def _get_api_client_with_location(
9595
location,
9696
api_client.location,
9797
)
98-
return vertexai.Client(
98+
return vertexai.Client( # type: ignore[no-any-return]
9999
project=api_client.project,
100100
location=location,
101101
credentials=api_client._credentials,

vertexai/_genai/_evals_metric_handlers.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -617,15 +617,15 @@ def _build_pointwise_input(
617617

618618
def _add_autorater_config(self, payload: dict[str, Any]) -> None:
619619
"""Adds autorater config to the request payload if specified."""
620-
autorater_config = {}
620+
autorater_config: dict[str, Any] = {}
621621
if self.metric.judge_model:
622622
autorater_config["autorater_model"] = self.metric.judge_model
623623
if self.metric.judge_model_generation_config:
624624
autorater_config["generation_config"] = (
625625
self.metric.judge_model_generation_config
626626
)
627627
if self.metric.judge_model_sampling_count:
628-
autorater_config["sampling_count"] = self.metric.judge_model_sampling_count # type: ignore[assignment]
628+
autorater_config["sampling_count"] = self.metric.judge_model_sampling_count
629629

630630
if not autorater_config:
631631
return
@@ -989,11 +989,11 @@ def _build_request_payload(
989989
agent_data=PredefinedMetricHandler._eval_case_to_agent_data(eval_case),
990990
)
991991

992-
request_payload = {
992+
request_payload: dict[str, Any] = {
993993
"instance": instance_payload,
994994
}
995995

996-
autorater_config = {}
996+
autorater_config: dict[str, Any] = {}
997997
if self.metric.judge_model:
998998
autorater_config["autorater_model"] = self.metric.judge_model
999999
if self.metric.judge_model_generation_config:

vertexai/_genai/evals.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
try:
3737
from google.adk.agents import LlmAgent
3838
except ImportError:
39-
LlmAgent = None # type: ignore[assignment]
39+
LlmAgent = None
4040

4141

4242
logger = logging.getLogger("vertexai_genai.evals")
@@ -1216,10 +1216,10 @@ def evaluate(
12161216
types.EvaluationDatasetOrDict,
12171217
list[types.EvaluationDatasetOrDict],
12181218
],
1219-
metrics: list[types.MetricOrDict] = None,
1219+
metrics: Optional[list[types.MetricOrDict]] = None,
12201220
location: Optional[str] = None,
12211221
config: Optional[types.EvaluateMethodConfigOrDict] = None,
1222-
**kwargs,
1222+
**kwargs: Any,
12231223
) -> types.EvaluationResult:
12241224
"""Evaluates candidate responses in the provided dataset(s) using the specified metrics.
12251225
@@ -1656,7 +1656,7 @@ def create_evaluation_run(
16561656
evaluation_config = types.EvaluationRunConfig(
16571657
output_config=output_config, metrics=resolved_metrics
16581658
)
1659-
if agent_info:
1659+
if agent_info and agent_info.name is not None:
16601660
inference_configs = {}
16611661
inference_configs[agent_info.name] = types.EvaluationRunInferenceConfig(
16621662
agent_config=types.EvaluationRunAgentConfig(
@@ -2533,7 +2533,7 @@ async def create_evaluation_run(
25332533
if not name:
25342534
name = f"evaluation_run_{uuid.uuid4()}"
25352535

2536-
result = await self._create_evaluation_run( # type: ignore[no-any-return]
2536+
result = await self._create_evaluation_run(
25372537
name=name,
25382538
display_name=display_name or name,
25392539
data_source=dataset,
@@ -2645,7 +2645,7 @@ async def create_evaluation_item(
26452645
Returns:
26462646
The evaluation item.
26472647
"""
2648-
result = await self._create_evaluation_item( # type: ignore[no-any-return]
2648+
result = await self._create_evaluation_item(
26492649
evaluation_item_type=evaluation_item_type,
26502650
gcs_uri=gcs_uri,
26512651
display_name=display_name,
@@ -2676,7 +2676,7 @@ async def create_evaluation_set(
26762676
Returns:
26772677
The evaluation set.
26782678
"""
2679-
result = await self._create_evaluation_set( # type: ignore[no-any-return]
2679+
result = await self._create_evaluation_set(
26802680
evaluation_items=evaluation_items,
26812681
display_name=display_name,
26822682
config=config,

0 commit comments

Comments
 (0)