diff --git a/doc/code/converters/0_converters.ipynb b/doc/code/converters/0_converters.ipynb index 512cf42f12..0343206a64 100644 --- a/doc/code/converters/0_converters.ipynb +++ b/doc/code/converters/0_converters.ipynb @@ -615,7 +615,7 @@ "\n", "# Display all rows\n", "pd.set_option(\"display.max_rows\", None)\n", - "df" + "print(df)" ] }, { diff --git a/doc/code/converters/0_converters.py b/doc/code/converters/0_converters.py index 5731a39826..8e9dd4ce4e 100644 --- a/doc/code/converters/0_converters.py +++ b/doc/code/converters/0_converters.py @@ -50,7 +50,7 @@ # Display all rows pd.set_option("display.max_rows", None) -df +print(df) # %% [markdown] # ## Converter Categories diff --git a/doc/code/converters/4_video_converters.ipynb b/doc/code/converters/4_video_converters.ipynb index 2681d0c8eb..e0bb7b614b 100644 --- a/doc/code/converters/4_video_converters.ipynb +++ b/doc/code/converters/4_video_converters.ipynb @@ -69,7 +69,7 @@ "\n", "video = AddImageVideoConverter(video_path=input_video)\n", "converted_vid = await video.convert_async(prompt=input_image, input_type=\"image_path\") # type: ignore\n", - "converted_vid" + "print(converted_vid)" ] } ], diff --git a/doc/code/converters/4_video_converters.py b/doc/code/converters/4_video_converters.py index ca2ace8613..c537193c1a 100644 --- a/doc/code/converters/4_video_converters.py +++ b/doc/code/converters/4_video_converters.py @@ -41,4 +41,4 @@ video = AddImageVideoConverter(video_path=input_video) converted_vid = await video.convert_async(prompt=input_image, input_type="image_path") # type: ignore -converted_vid +print(converted_vid) diff --git a/doc/code/memory/embeddings.ipynb b/doc/code/memory/embeddings.ipynb index fbdc1d68ed..519389fd38 100644 --- a/doc/code/memory/embeddings.ipynb +++ b/doc/code/memory/embeddings.ipynb @@ -109,7 +109,7 @@ "from pyrit.common.path import DB_DATA_PATH\n", "\n", "saved_embedding_path = embedding_response.save_to_file(directory_path=DB_DATA_PATH)\n", - "saved_embedding_path" + "print(saved_embedding_path)" ] }, { @@ -143,7 +143,7 @@ "from pyrit.common.path import DB_DATA_PATH\n", "\n", "saved_embedding_path = embedding_response.save_to_file(directory_path=DB_DATA_PATH)\n", - "saved_embedding_path" + "print(saved_embedding_path)" ] } ], diff --git a/doc/code/memory/embeddings.py b/doc/code/memory/embeddings.py index 27f60a5c64..74a74acdc9 100644 --- a/doc/code/memory/embeddings.py +++ b/doc/code/memory/embeddings.py @@ -49,7 +49,7 @@ from pyrit.common.path import DB_DATA_PATH saved_embedding_path = embedding_response.save_to_file(directory_path=DB_DATA_PATH) -saved_embedding_path +print(saved_embedding_path) # %% [markdown] # To load an embedding from disk @@ -59,4 +59,4 @@ from pyrit.common.path import DB_DATA_PATH saved_embedding_path = embedding_response.save_to_file(directory_path=DB_DATA_PATH) -saved_embedding_path +print(saved_embedding_path) diff --git a/doc/code/scoring/8_scorer_metrics.ipynb b/doc/code/scoring/8_scorer_metrics.ipynb index a82bf84ec1..c8d4d9f3c7 100644 --- a/doc/code/scoring/8_scorer_metrics.ipynb +++ b/doc/code/scoring/8_scorer_metrics.ipynb @@ -388,7 +388,7 @@ "\n", "print(\"Top 5 configurations by F1 Score:\")\n", "print(\"-\" * 80)\n", - "for i, entry in enumerate(sorted_by_f1[:5], 1):\n", + "for _i, entry in enumerate(sorted_by_f1[:5], 1):\n", " printer = ConsoleScorerPrinter()\n", " printer.print_objective_scorer(scorer_identifier=entry.scorer_identifier)\n", "\n", @@ -468,7 +468,7 @@ "\n", "print(\"Top configurations by Mean Absolute Error:\")\n", "print(\"-\" * 80)\n", - "for i, e in enumerate(sorted_by_mae[:5], 1):\n", + "for _i, e in enumerate(sorted_by_mae[:5], 1):\n", " printer = ConsoleScorerPrinter()\n", " printer.print_harm_scorer(scorer_identifier=e.scorer_identifier, harm_category=\"violence\")" ] diff --git a/doc/code/scoring/8_scorer_metrics.py b/doc/code/scoring/8_scorer_metrics.py index 2a1443da32..fcd09a65c1 100644 --- a/doc/code/scoring/8_scorer_metrics.py +++ b/doc/code/scoring/8_scorer_metrics.py @@ -195,7 +195,7 @@ print("Top 5 configurations by F1 Score:") print("-" * 80) -for i, entry in enumerate(sorted_by_f1[:5], 1): +for _i, entry in enumerate(sorted_by_f1[:5], 1): printer = ConsoleScorerPrinter() printer.print_objective_scorer(scorer_identifier=entry.scorer_identifier) @@ -227,7 +227,7 @@ print("Top configurations by Mean Absolute Error:") print("-" * 80) -for i, e in enumerate(sorted_by_mae[:5], 1): +for _i, e in enumerate(sorted_by_mae[:5], 1): printer = ConsoleScorerPrinter() printer.print_harm_scorer(scorer_identifier=e.scorer_identifier, harm_category="violence") diff --git a/doc/cookbooks/4_testing_bias.ipynb b/doc/cookbooks/4_testing_bias.ipynb index d99c05652e..ed9d31529a 100644 --- a/doc/cookbooks/4_testing_bias.ipynb +++ b/doc/cookbooks/4_testing_bias.ipynb @@ -323,7 +323,7 @@ "for job, gender in itertools.product(jobs[:3], genders): # for demonstration purposes we just use first 3 jobs\n", " question = f\"Is the {job} job better performed by {gender}?\"\n", " answers[question] = []\n", - " for i in range(10):\n", + " for _i in range(10):\n", " result = await attack.execute_async(objective=question, prepended_conversation=[system_prompt]) # type: ignore\n", " answer = memory.get_conversation(conversation_id=result.conversation_id)[2].get_value()\n", " if answer.startswith(\"YES\"):\n", @@ -626,7 +626,7 @@ " ],\n", " )\n", " answers[question] = []\n", - " for i in range(5):\n", + " for _i in range(5):\n", " result = await benchmark.execute_async(question_answering_entry=question_answering_entry) # type: ignore\n", " answers[question].append(str(result.outcome))\n", "\n", diff --git a/doc/cookbooks/4_testing_bias.py b/doc/cookbooks/4_testing_bias.py index 2186801e88..f4392d2309 100644 --- a/doc/cookbooks/4_testing_bias.py +++ b/doc/cookbooks/4_testing_bias.py @@ -127,7 +127,7 @@ for job, gender in itertools.product(jobs[:3], genders): # for demonstration purposes we just use first 3 jobs question = f"Is the {job} job better performed by {gender}?" answers[question] = [] - for i in range(10): + for _i in range(10): result = await attack.execute_async(objective=question, prepended_conversation=[system_prompt]) # type: ignore answer = memory.get_conversation(conversation_id=result.conversation_id)[2].get_value() if answer.startswith("YES"): @@ -172,7 +172,7 @@ ], ) answers[question] = [] - for i in range(5): + for _i in range(5): result = await benchmark.execute_async(question_answering_entry=question_answering_entry) # type: ignore answers[question].append(str(result.outcome)) diff --git a/doc/generate_docs/pct_to_ipynb.py b/doc/generate_docs/pct_to_ipynb.py index b6bd1fbf07..9ced043d36 100644 --- a/doc/generate_docs/pct_to_ipynb.py +++ b/doc/generate_docs/pct_to_ipynb.py @@ -87,7 +87,7 @@ def find_files(directory, file_extension): dir_path = os.path.join(directory, included_dir) if not os.path.exists(dir_path): continue - for root, dirs, files in os.walk(dir_path): + for root, _dirs, files in os.walk(dir_path): for file in files: if file.endswith("_helpers.py"): continue diff --git a/pyproject.toml b/pyproject.toml index 9e9d492040..13b2f3ce2c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -252,6 +252,7 @@ fixable = [ "YTT", ] select = [ + "B", # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b "C4", # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4 "CPY001", # missing-copyright-notice "D", # https://docs.astral.sh/ruff/rules/#pydocstyle-d @@ -265,6 +266,7 @@ select = [ "W", # https://docs.astral.sh/ruff/rules/#pycodestyle-w ] ignore = [ + "B903", # class-as-data-structure (test helper classes use @apply_defaults pattern) "D100", # Missing docstring in public module "D200", # One-line docstring should fit on one line "D205", # 1 blank line required between summary line and description @@ -297,10 +299,12 @@ notice-rgx = "Copyright \\(c\\) Microsoft Corporation\\.\\s*\\n.*Licensed under # Temporary ignores for pyrit/ subdirectories until issue #1176 # https://github.com/Azure/PyRIT/issues/1176 is fully resolved # TODO: Remove these ignores once the issues are fixed -"pyrit/{auxiliary_attacks,ui}/**/*.py" = ["D101", "D102", "D103", "D104", "D105", "D106", "D107", "D401", "D404", "D417", "D418", "DOC102", "DOC201", "DOC202", "DOC402", "DOC501", "SIM101", "SIM108"] +"pyrit/{auxiliary_attacks,ui}/**/*.py" = ["B905", "D101", "D102", "D103", "D104", "D105", "D106", "D107", "D401", "D404", "D417", "D418", "DOC102", "DOC201", "DOC202", "DOC402", "DOC501", "SIM101", "SIM108"] # Backend API routes raise HTTPException handled by FastAPI, not true exceptions -"pyrit/backend/**/*.py" = ["DOC501"] +"pyrit/backend/**/*.py" = ["DOC501", "B008"] "pyrit/__init__.py" = ["D104"] +# Allow broad pytest.raises(Exception) in tests +"tests/**/*.py" = ["B017"] [tool.ruff.lint.pydocstyle] convention = "google" diff --git a/pyrit/auth/copilot_authenticator.py b/pyrit/auth/copilot_authenticator.py index 865768ff30..4e7358aa64 100644 --- a/pyrit/auth/copilot_authenticator.py +++ b/pyrit/auth/copilot_authenticator.py @@ -304,7 +304,7 @@ async def _fetch_access_token_with_playwright(self) -> Optional[str]: raise RuntimeError( "Playwright is not installed. Please install it with: " "'pip install playwright && playwright install chromium'" - ) + ) from None # On Windows, when using SelectorEventLoop (common in Jupyter), we need to run # Playwright in a separate thread with ProactorEventLoop to support subprocesses diff --git a/pyrit/auth/manual_copilot_authenticator.py b/pyrit/auth/manual_copilot_authenticator.py index 8dca7293df..e23848cd4b 100644 --- a/pyrit/auth/manual_copilot_authenticator.py +++ b/pyrit/auth/manual_copilot_authenticator.py @@ -63,7 +63,7 @@ def __init__(self, *, access_token: Optional[str] = None) -> None: resolved_token, algorithms=["RS256"], options={"verify_signature": False} ) except jwt.exceptions.DecodeError as e: - raise ValueError(f"Failed to decode access_token as JWT: {e}") + raise ValueError(f"Failed to decode access_token as JWT: {e}") from e required_claims = ["tid", "oid"] missing_claims = [claim for claim in required_claims if claim not in self._claims] diff --git a/pyrit/backend/routes/attacks.py b/pyrit/backend/routes/attacks.py index b32fb30978..ed6fc4c029 100644 --- a/pyrit/backend/routes/attacks.py +++ b/pyrit/backend/routes/attacks.py @@ -153,7 +153,7 @@ async def create_attack(request: CreateAttackRequest) -> CreateAttackResponse: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=str(e), - ) + ) from e @router.get( @@ -280,13 +280,13 @@ async def add_message( raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=error_msg, - ) + ) from e raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=error_msg, - ) + ) from e except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to add message: {str(e)}", - ) + ) from e diff --git a/pyrit/backend/routes/converters.py b/pyrit/backend/routes/converters.py index f4354ba50d..095b6ef440 100644 --- a/pyrit/backend/routes/converters.py +++ b/pyrit/backend/routes/converters.py @@ -67,12 +67,12 @@ async def create_converter(request: CreateConverterRequest) -> CreateConverterRe raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e), - ) + ) from e except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to create converter: {str(e)}", - ) + ) from e @router.get( @@ -126,9 +126,9 @@ async def preview_conversion(request: ConverterPreviewRequest) -> ConverterPrevi raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e), - ) + ) from e except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Converter preview failed: {str(e)}", - ) + ) from e diff --git a/pyrit/backend/routes/targets.py b/pyrit/backend/routes/targets.py index 437d8212ff..f17f4f4f68 100644 --- a/pyrit/backend/routes/targets.py +++ b/pyrit/backend/routes/targets.py @@ -74,12 +74,12 @@ async def create_target(request: CreateTargetRequest) -> TargetInstance: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=str(e), - ) + ) from e except Exception as e: raise HTTPException( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"Failed to create target: {str(e)}", - ) + ) from e @router.get( diff --git a/pyrit/common/notebook_utils.py b/pyrit/common/notebook_utils.py index 0a098a1a7b..8b8c98a117 100644 --- a/pyrit/common/notebook_utils.py +++ b/pyrit/common/notebook_utils.py @@ -13,7 +13,7 @@ def is_in_ipython_session() -> bool: bool: True if the code is running in an IPython session, False otherwise. """ try: - __IPYTHON__ # type: ignore + __IPYTHON__ # type: ignore # noqa: B018 return True except NameError: return False diff --git a/pyrit/common/yaml_loadable.py b/pyrit/common/yaml_loadable.py index c7f4efa739..c3c669ed75 100644 --- a/pyrit/common/yaml_loadable.py +++ b/pyrit/common/yaml_loadable.py @@ -12,7 +12,7 @@ T = TypeVar("T", bound="YamlLoadable") -class YamlLoadable(abc.ABC): +class YamlLoadable(abc.ABC): # noqa: B024 """ Abstract base class for objects that can be loaded from YAML files. """ @@ -36,10 +36,10 @@ def from_yaml_file(cls: type[T], file: Union[Path | str]) -> T: try: yaml_data = yaml.safe_load(file.read_text("utf-8")) except yaml.YAMLError as exc: - raise ValueError(f"Invalid YAML file '{file}': {exc}") + raise ValueError(f"Invalid YAML file '{file}': {exc}") from exc # If this class provides a from_dict factory, use it; # otherwise, just instantiate directly with **yaml_data - if hasattr(cls, "from_dict") and callable(getattr(cls, "from_dict")): + if hasattr(cls, "from_dict") and callable(getattr(cls, "from_dict")): # noqa: B009 return cls.from_dict(yaml_data) # type: ignore return cls(**yaml_data) diff --git a/pyrit/datasets/seed_datasets/remote/jbb_behaviors_dataset.py b/pyrit/datasets/seed_datasets/remote/jbb_behaviors_dataset.py index 5b61974ef5..a622a4a018 100644 --- a/pyrit/datasets/seed_datasets/remote/jbb_behaviors_dataset.py +++ b/pyrit/datasets/seed_datasets/remote/jbb_behaviors_dataset.py @@ -121,7 +121,7 @@ async def fetch_dataset(self, *, cache: bool = True) -> SeedDataset: except Exception as e: logger.error(f"Failed to load JBB-Behaviors dataset: {str(e)}") - raise Exception(f"Error loading JBB-Behaviors dataset: {str(e)}") + raise Exception(f"Error loading JBB-Behaviors dataset: {str(e)}") from e def _map_jbb_category_to_harm_category(self, jbb_category: str) -> list[str]: """ diff --git a/pyrit/datasets/seed_datasets/remote/vlsu_multimodal_dataset.py b/pyrit/datasets/seed_datasets/remote/vlsu_multimodal_dataset.py index 25f8291ee9..59bc8406b4 100644 --- a/pyrit/datasets/seed_datasets/remote/vlsu_multimodal_dataset.py +++ b/pyrit/datasets/seed_datasets/remote/vlsu_multimodal_dataset.py @@ -56,7 +56,7 @@ def __init__( source: str = "https://raw.githubusercontent.com/apple/ml-vlsu/main/data/VLSU.csv", source_type: Literal["public_url", "file"] = "public_url", categories: Optional[list[VLSUCategory]] = None, - unsafe_grades: Optional[list[str]] = ["unsafe", "borderline"], + unsafe_grades: Optional[list[str]] = None, max_examples: Optional[int] = None, ): """ @@ -77,6 +77,8 @@ def __init__( Raises: ValueError: If any of the specified categories are invalid. """ + if unsafe_grades is None: + unsafe_grades = ["unsafe", "borderline"] self.source = source self.source_type: Literal["public_url", "file"] = source_type self.categories = categories diff --git a/pyrit/datasets/seed_datasets/seed_dataset_provider.py b/pyrit/datasets/seed_datasets/seed_dataset_provider.py index 7388164078..56b61b3996 100644 --- a/pyrit/datasets/seed_datasets/seed_dataset_provider.py +++ b/pyrit/datasets/seed_datasets/seed_dataset_provider.py @@ -99,7 +99,7 @@ def get_all_dataset_names(cls) -> list[str]: provider = provider_class() dataset_names.add(provider.dataset_name) except Exception as e: - raise ValueError(f"Could not get dataset name from {provider_class.__name__}: {e}") + raise ValueError(f"Could not get dataset name from {provider_class.__name__}: {e}") from e return sorted(dataset_names) @classmethod diff --git a/pyrit/executor/attack/core/attack_executor.py b/pyrit/executor/attack/core/attack_executor.py index 06fa2e6823..a7c99d1ddd 100644 --- a/pyrit/executor/attack/core/attack_executor.py +++ b/pyrit/executor/attack/core/attack_executor.py @@ -332,7 +332,7 @@ def _process_execution_results( completed: list[AttackStrategyResultT] = [] incomplete: list[tuple[str, BaseException]] = [] - for objective, result in zip(objectives, results_or_exceptions): + for objective, result in zip(objectives, results_or_exceptions, strict=False): if isinstance(result, BaseException): incomplete.append((objective, result)) else: diff --git a/pyrit/executor/attack/multi_turn/tree_of_attacks.py b/pyrit/executor/attack/multi_turn/tree_of_attacks.py index 7414bcbdc6..ccc98c2408 100644 --- a/pyrit/executor/attack/multi_turn/tree_of_attacks.py +++ b/pyrit/executor/attack/multi_turn/tree_of_attacks.py @@ -1133,13 +1133,17 @@ def _parse_red_teaming_response(self, red_teaming_response: str) -> str: red_teaming_response_dict = json.loads(red_teaming_response) except json.JSONDecodeError: logger.error(f"The response from the red teaming chat is not in JSON format: {red_teaming_response}") - raise InvalidJsonException(message="The response from the red teaming chat is not in JSON format.") + raise InvalidJsonException( + message="The response from the red teaming chat is not in JSON format." + ) from None try: return cast(str, red_teaming_response_dict["prompt"]) except KeyError: logger.error(f"The response from the red teaming chat does not contain a prompt: {red_teaming_response}") - raise InvalidJsonException(message="The response from the red teaming chat does not contain a prompt.") + raise InvalidJsonException( + message="The response from the red teaming chat does not contain a prompt." + ) from None def __str__(self) -> str: """ diff --git a/pyrit/executor/attack/single_turn/context_compliance.py b/pyrit/executor/attack/single_turn/context_compliance.py index 1c9d769eb4..d03ab2a41f 100644 --- a/pyrit/executor/attack/single_turn/context_compliance.py +++ b/pyrit/executor/attack/single_turn/context_compliance.py @@ -119,7 +119,7 @@ def _load_context_description_instructions(self, *, instructions_path: Path) -> try: context_description_instructions = SeedDataset.from_yaml_file(instructions_path) except Exception as e: - raise ValueError(f"Failed to load context description instructions from {instructions_path}: {e}") + raise ValueError(f"Failed to load context description instructions from {instructions_path}: {e}") from e if len(context_description_instructions.prompts) < 3: raise ValueError( diff --git a/pyrit/executor/core/strategy.py b/pyrit/executor/core/strategy.py index 383465aacf..df3786ab19 100644 --- a/pyrit/executor/core/strategy.py +++ b/pyrit/executor/core/strategy.py @@ -24,7 +24,7 @@ @dataclass -class StrategyContext(ABC): +class StrategyContext(ABC): # noqa: B024 """Base class for all strategy contexts.""" def duplicate(self: StrategyContextT) -> StrategyContextT: diff --git a/pyrit/executor/promptgen/fuzzer/fuzzer_converter_base.py b/pyrit/executor/promptgen/fuzzer/fuzzer_converter_base.py index a0b759f7de..73d0e6d3dc 100644 --- a/pyrit/executor/promptgen/fuzzer/fuzzer_converter_base.py +++ b/pyrit/executor/promptgen/fuzzer/fuzzer_converter_base.py @@ -136,7 +136,7 @@ async def send_prompt_async(self, request: Message) -> str: return str(parsed_response["output"]) except json.JSONDecodeError: - raise InvalidJsonException(message=f"Invalid JSON encountered: {response_msg}") + raise InvalidJsonException(message=f"Invalid JSON encountered: {response_msg}") from None def input_supported(self, input_type: PromptDataType) -> bool: """ diff --git a/pyrit/memory/memory_embedding.py b/pyrit/memory/memory_embedding.py index 5c85f30c74..6ff724db22 100644 --- a/pyrit/memory/memory_embedding.py +++ b/pyrit/memory/memory_embedding.py @@ -82,5 +82,7 @@ def default_memory_embedding_factory(embedding_model: Optional[EmbeddingSupport] try: model = OpenAITextEmbedding() return MemoryEmbedding(embedding_model=model) - except ValueError: - raise ValueError("No embedding model was provided and no OpenAI embedding model was found in the environment.") + except ValueError as e: + raise ValueError( + "No embedding model was provided and no OpenAI embedding model was found in the environment." + ) from e diff --git a/pyrit/models/harm_definition.py b/pyrit/models/harm_definition.py index 53f5f4012d..d8c836e977 100644 --- a/pyrit/models/harm_definition.py +++ b/pyrit/models/harm_definition.py @@ -140,7 +140,7 @@ def from_yaml(cls, harm_definition_path: Union[str, Path]) -> "HarmDefinition": with open(resolved_path, encoding="utf-8") as f: data = yaml.safe_load(f) except yaml.YAMLError as e: - raise ValueError(f"Invalid YAML in harm definition file {resolved_path}: {e}") + raise ValueError(f"Invalid YAML in harm definition file {resolved_path}: {e}") from e if not isinstance(data, dict): raise ValueError(f"Harm definition file {resolved_path} must contain a YAML mapping/dictionary.") diff --git a/pyrit/models/json_response_config.py b/pyrit/models/json_response_config.py index ad28b1677a..9ddb914eda 100644 --- a/pyrit/models/json_response_config.py +++ b/pyrit/models/json_response_config.py @@ -46,8 +46,8 @@ def from_metadata(cls, *, metadata: Optional[dict[str, Any]]) -> _JsonResponseCo if isinstance(schema_val, str): try: schema = json.loads(schema_val) if schema_val else None - except json.JSONDecodeError: - raise ValueError(f"Invalid JSON schema provided: {schema_val}") + except json.JSONDecodeError as e: + raise ValueError(f"Invalid JSON schema provided: {schema_val}") from e else: schema = schema_val diff --git a/pyrit/models/question_answering.py b/pyrit/models/question_answering.py index 1d526a2dad..c468461090 100644 --- a/pyrit/models/question_answering.py +++ b/pyrit/models/question_answering.py @@ -44,11 +44,11 @@ def get_correct_answer_text(self) -> str: try: # Match using the explicit choice.index (not enumerate position) so non-sequential indices are supported return next(choice for choice in self.choices if str(choice.index) == str(correct_answer_index)).text - except StopIteration: + except StopIteration as e: raise ValueError( f"No matching choice found for correct_answer '{correct_answer_index}'. " f"Available choices are: {[f'{i}: {c.text}' for i, c in enumerate(self.choices)]}" - ) + ) from e def __hash__(self) -> int: """ diff --git a/pyrit/models/score.py b/pyrit/models/score.py index 6e1b2ab79b..fa01d5432c 100644 --- a/pyrit/models/score.py +++ b/pyrit/models/score.py @@ -152,8 +152,8 @@ def validate(self, scorer_type: str, score_value: str) -> None: score = float(score_value) if not (0 <= score <= 1): raise ValueError(f"Float scale scorers must have a score value between 0 and 1. Got {score_value}") - except ValueError: - raise ValueError(f"Float scale scorers require a numeric score value. Got {score_value}") + except ValueError as e: + raise ValueError(f"Float scale scorers require a numeric score value. Got {score_value}") from e def to_dict(self) -> dict[str, Any]: """ diff --git a/pyrit/models/strategy_result.py b/pyrit/models/strategy_result.py index 8de784e863..38fac2af04 100644 --- a/pyrit/models/strategy_result.py +++ b/pyrit/models/strategy_result.py @@ -12,7 +12,7 @@ @dataclass -class StrategyResult(ABC): +class StrategyResult(ABC): # noqa: B024 """Base class for all strategy results.""" def duplicate(self: StrategyResultT) -> StrategyResultT: diff --git a/pyrit/prompt_converter/denylist_converter.py b/pyrit/prompt_converter/denylist_converter.py index 55609625ad..a9672e3718 100644 --- a/pyrit/prompt_converter/denylist_converter.py +++ b/pyrit/prompt_converter/denylist_converter.py @@ -28,7 +28,7 @@ def __init__( *, converter_target: PromptChatTarget = REQUIRED_VALUE, # type: ignore[assignment] system_prompt_template: Optional[SeedPrompt] = None, - denylist: list[str] = [], + denylist: list[str] = None, ): """ Initialize the converter with a target, an optional system prompt template, and a denylist. @@ -41,6 +41,8 @@ def __init__( denylist (list[str]): A list of words or phrases that should be replaced in the prompt. """ # set to default strategy if not provided + if denylist is None: + denylist = [] system_prompt_template = ( system_prompt_template if system_prompt_template diff --git a/pyrit/prompt_converter/image_compression_converter.py b/pyrit/prompt_converter/image_compression_converter.py index 9c2e8179c0..a0d25d9390 100644 --- a/pyrit/prompt_converter/image_compression_converter.py +++ b/pyrit/prompt_converter/image_compression_converter.py @@ -263,7 +263,7 @@ async def _read_image_from_url(self, url: str) -> bytes: response.raise_for_status() return await response.read() except aiohttp.ClientError as e: - raise RuntimeError(f"Failed to download content from URL {url}: {str(e)}") + raise RuntimeError(f"Failed to download content from URL {url}: {str(e)}") from e async def convert_async(self, *, prompt: str, input_type: PromptDataType = "image_path") -> ConverterResult: """ diff --git a/pyrit/prompt_converter/pdf_converter.py b/pyrit/prompt_converter/pdf_converter.py index 76ed363d2d..bd85b019a4 100644 --- a/pyrit/prompt_converter/pdf_converter.py +++ b/pyrit/prompt_converter/pdf_converter.py @@ -195,7 +195,7 @@ def _prepare_content(self, prompt: str) -> str: except (ValueError, KeyError) as e: logger.error(f"Error rendering prompt: {e}") - raise ValueError(f"Failed to render the prompt: {e}") + raise ValueError(f"Failed to render the prompt: {e}") from e # If no template is provided, return the raw prompt as content if isinstance(prompt, str): diff --git a/pyrit/prompt_converter/persuasion_converter.py b/pyrit/prompt_converter/persuasion_converter.py index 9cc7bd5f1f..152d0150fa 100644 --- a/pyrit/prompt_converter/persuasion_converter.py +++ b/pyrit/prompt_converter/persuasion_converter.py @@ -76,7 +76,9 @@ def __init__( pathlib.Path(CONVERTER_SEED_PROMPT_PATH) / "persuasion" / f"{persuasion_technique}.yaml" ) except FileNotFoundError: - raise ValueError(f"Persuasion technique '{persuasion_technique}' does not exist or is not supported.") + raise ValueError( + f"Persuasion technique '{persuasion_technique}' does not exist or is not supported." + ) from None self.system_prompt = str(prompt_template.value) self._persuasion_technique = persuasion_technique @@ -167,4 +169,4 @@ async def send_persuasion_prompt_async(self, request: Message) -> str: return str(parsed_response["mutated_text"]) except json.JSONDecodeError: - raise InvalidJsonException(message=f"Invalid JSON encountered: {response_msg}") + raise InvalidJsonException(message=f"Invalid JSON encountered: {response_msg}") from None diff --git a/pyrit/prompt_converter/prompt_converter.py b/pyrit/prompt_converter/prompt_converter.py index 8f2c7002ad..141076e701 100644 --- a/pyrit/prompt_converter/prompt_converter.py +++ b/pyrit/prompt_converter/prompt_converter.py @@ -156,7 +156,7 @@ async def convert_tokens_async( tasks = [self._replace_text_match(match) for match in matches] converted_parts = await asyncio.gather(*tasks) - for original, converted in zip(matches, converted_parts): + for original, converted in zip(matches, converted_parts, strict=False): prompt = prompt.replace(f"{start_token}{original}{end_token}", converted.output_text, 1) return ConverterResult(output_text=prompt, output_type="text") diff --git a/pyrit/prompt_converter/template_segment_converter.py b/pyrit/prompt_converter/template_segment_converter.py index def0ad3a3d..8520436471 100644 --- a/pyrit/prompt_converter/template_segment_converter.py +++ b/pyrit/prompt_converter/template_segment_converter.py @@ -69,7 +69,7 @@ def __init__( raise ValueError( f"Error validating template parameters: {str(e)}. " f"Template parameters: {self.prompt_template.parameters}" - ) + ) from e def _build_identifier(self) -> ComponentIdentifier: """ @@ -107,7 +107,7 @@ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "text segments = self._split_prompt_into_segments(prompt) filled_template = self.prompt_template.render_template_value( - **dict(zip(self.prompt_template.parameters, segments)) + **dict(zip(self.prompt_template.parameters, segments, strict=False)) ) return ConverterResult(output_text=filled_template, output_type="text") diff --git a/pyrit/prompt_converter/transparency_attack_converter.py b/pyrit/prompt_converter/transparency_attack_converter.py index 2580b7fe64..9897f3a108 100644 --- a/pyrit/prompt_converter/transparency_attack_converter.py +++ b/pyrit/prompt_converter/transparency_attack_converter.py @@ -219,7 +219,7 @@ def _load_and_preprocess_image(self, path: str) -> numpy.ndarray: # type: ignor img_resized = img_gray.resize(self.size, Image.Resampling.LANCZOS) return numpy.array(img_resized, dtype=numpy.float32) / 255.0 # normalize to [0, 1] except Exception as e: - raise ValueError(f"Failed to load and preprocess image from {path}: {e}") + raise ValueError(f"Failed to load and preprocess image from {path}: {e}") from e def _compute_mse_loss(self, blended_image: numpy.ndarray, target_tensor: numpy.ndarray) -> float: # type: ignore[type-arg, unused-ignore] """ @@ -284,7 +284,7 @@ async def _save_blended_image(self, attack_image: numpy.ndarray, alpha: numpy.nd await img_serializer.save_b64_image(data=image_str.decode()) return img_serializer.value except Exception as e: - raise ValueError(f"Failed to save blended image: {e}") + raise ValueError(f"Failed to save blended image: {e}") from e async def convert_async(self, *, prompt: str, input_type: PromptDataType = "image_path") -> ConverterResult: """ diff --git a/pyrit/prompt_converter/variation_converter.py b/pyrit/prompt_converter/variation_converter.py index e2f7fc2def..328e463072 100644 --- a/pyrit/prompt_converter/variation_converter.py +++ b/pyrit/prompt_converter/variation_converter.py @@ -153,9 +153,9 @@ async def send_variation_prompt_async(self, request: Message) -> str: response = json.loads(response_msg) except json.JSONDecodeError: - raise InvalidJsonException(message=f"Invalid JSON response: {response_msg}") + raise InvalidJsonException(message=f"Invalid JSON response: {response_msg}") from None try: return str(response[0]) except KeyError: - raise InvalidJsonException(message=f"Invalid JSON response: {response_msg}") + raise InvalidJsonException(message=f"Invalid JSON response: {response_msg}") from None diff --git a/pyrit/prompt_converter/zalgo_converter.py b/pyrit/prompt_converter/zalgo_converter.py index 3c43cbd8eb..c24e7a5394 100644 --- a/pyrit/prompt_converter/zalgo_converter.py +++ b/pyrit/prompt_converter/zalgo_converter.py @@ -58,7 +58,7 @@ def _normalize_intensity(self, intensity: int) -> int: try: intensity = int(intensity) except (TypeError, ValueError): - raise ValueError(f"Invalid intensity value: {intensity!r} (must be an integer)") + raise ValueError(f"Invalid intensity value: {intensity!r} (must be an integer)") from None normalized_intensity = max(0, min(intensity, MAX_INTENSITY)) if intensity != normalized_intensity: diff --git a/pyrit/prompt_normalizer/normalizer_request.py b/pyrit/prompt_normalizer/normalizer_request.py index 1bc15de1b1..30869a09b2 100644 --- a/pyrit/prompt_normalizer/normalizer_request.py +++ b/pyrit/prompt_normalizer/normalizer_request.py @@ -25,8 +25,8 @@ def __init__( self, *, message: Message, - request_converter_configurations: list[PromptConverterConfiguration] = [], - response_converter_configurations: list[PromptConverterConfiguration] = [], + request_converter_configurations: list[PromptConverterConfiguration] = None, + response_converter_configurations: list[PromptConverterConfiguration] = None, conversation_id: Optional[str] = None, ): """ @@ -40,6 +40,10 @@ def __init__( the response. Defaults to an empty list. conversation_id (Optional[str]): The ID of the conversation. Defaults to None. """ + if response_converter_configurations is None: + response_converter_configurations = [] + if request_converter_configurations is None: + request_converter_configurations = [] self.message = message self.request_converter_configurations = request_converter_configurations self.response_converter_configurations = response_converter_configurations diff --git a/pyrit/prompt_normalizer/prompt_normalizer.py b/pyrit/prompt_normalizer/prompt_normalizer.py index 01c8dab711..b730a58669 100644 --- a/pyrit/prompt_normalizer/prompt_normalizer.py +++ b/pyrit/prompt_normalizer/prompt_normalizer.py @@ -51,8 +51,8 @@ async def send_prompt_async( message: Message, target: PromptTarget, conversation_id: Optional[str] = None, - request_converter_configurations: list[PromptConverterConfiguration] = [], - response_converter_configurations: list[PromptConverterConfiguration] = [], + request_converter_configurations: list[PromptConverterConfiguration] | None = None, + response_converter_configurations: list[PromptConverterConfiguration] | None = None, labels: Optional[dict[str, str]] = None, attack_identifier: Optional[ComponentIdentifier] = None, ) -> Message: @@ -79,6 +79,8 @@ async def send_prompt_async( Message: The response received from the target. """ # Validates that the MessagePieces in the Message are part of the same sequence + request_converter_configurations = request_converter_configurations or [] + response_converter_configurations = response_converter_configurations or [] if len({piece.sequence for piece in message.message_pieces}) > 1: raise ValueError("All MessagePieces in the Message must have the same sequence.") diff --git a/pyrit/prompt_target/azure_ml_chat_target.py b/pyrit/prompt_target/azure_ml_chat_target.py index 24c49299dd..4daac0ee16 100644 --- a/pyrit/prompt_target/azure_ml_chat_target.py +++ b/pyrit/prompt_target/azure_ml_chat_target.py @@ -180,7 +180,7 @@ async def send_prompt_async(self, *, message: Message) -> list[Message]: # Handle Bad Request response_entry = handle_bad_request_exception(response_text=hse.response.text, request=request) elif hse.response.status_code == 429: - raise RateLimitException() + raise RateLimitException() from hse else: raise hse @@ -218,11 +218,11 @@ async def _complete_chat_async( return str(response.json()["output"]) except Exception as e: if response.json() == {}: - raise EmptyResponseException(message="The chat returned an empty response.") + raise EmptyResponseException(message="The chat returned an empty response.") from e raise e( f"Exception obtaining response from the target. Returned response: {response.json()}. " + f"Exception: {str(e)}" # type: ignore - ) + ) from e async def _construct_http_body_async( self, diff --git a/pyrit/prompt_target/openai/openai_response_target.py b/pyrit/prompt_target/openai/openai_response_target.py index d61923bc32..7a8dec1da4 100644 --- a/pyrit/prompt_target/openai/openai_response_target.py +++ b/pyrit/prompt_target/openai/openai_response_target.py @@ -533,7 +533,7 @@ async def send_prompt_async(self, *, message: Message) -> list[Message]: # Use unified error handling - automatically detects Response and validates result = await self._handle_openai_request( - api_call=lambda: self._async_client.responses.create(**body), + api_call=lambda body=body: self._async_client.responses.create(**body), request=message, ) @@ -755,7 +755,7 @@ async def _execute_call_section(self, tool_call_section: dict[str, Any]) -> dict except Exception: # If arguments are not valid JSON, surface a structured error (or raise) if self._fail_on_missing_function: - raise ValueError(f"Malformed arguments for function '{name}': {args_json}") + raise ValueError(f"Malformed arguments for function '{name}': {args_json}") from None logger.warning("Malformed arguments for function '%s': %s", name, args_json) return { "error": "malformed_arguments", diff --git a/pyrit/prompt_target/openai/openai_target.py b/pyrit/prompt_target/openai/openai_target.py index 6e08a3d76a..bf9c46bf6e 100644 --- a/pyrit/prompt_target/openai/openai_target.py +++ b/pyrit/prompt_target/openai/openai_target.py @@ -496,14 +496,14 @@ def model_dump_json(self) -> str: request_id = _extract_request_id_from_exception(e) retry_after = _extract_retry_after_from_exception(e) logger.warning(f"RateLimitError request_id={request_id} retry_after={retry_after} error={e}") - raise RateLimitException() + raise RateLimitException() from None except APIStatusError as e: # Other API status errors - check for 429 here as well request_id = _extract_request_id_from_exception(e) if getattr(e, "status_code", None) == 429: retry_after = _extract_retry_after_from_exception(e) logger.warning(f"429 via APIStatusError request_id={request_id} retry_after={retry_after}") - raise RateLimitException() + raise RateLimitException() from None logger.exception( f"APIStatusError request_id={request_id} status={getattr(e, 'status_code', None)} error={e}" ) diff --git a/pyrit/prompt_target/websocket_copilot_target.py b/pyrit/prompt_target/websocket_copilot_target.py index f4d5105b70..9b72b9f052 100644 --- a/pyrit/prompt_target/websocket_copilot_target.py +++ b/pyrit/prompt_target/websocket_copilot_target.py @@ -364,7 +364,7 @@ async def _build_prompt_message( text_parts: list[str] = [] message_annotations: list[dict[str, Any]] = [] - for idx, piece in enumerate(message_pieces): + for _idx, piece in enumerate(message_pieces): if piece.converted_value_data_type == "text": text_parts.append(piece.converted_value) @@ -527,7 +527,7 @@ async def _connect_and_send( except asyncio.TimeoutError: raise TimeoutError( f"Timed out waiting for Copilot response after {self._response_timeout_seconds} seconds." - ) + ) from None if raw_message is None: raise RuntimeError( diff --git a/pyrit/registry/discovery.py b/pyrit/registry/discovery.py index 72fdd6f91d..3ba7174f2b 100644 --- a/pyrit/registry/discovery.py +++ b/pyrit/registry/discovery.py @@ -127,7 +127,7 @@ def discover_in_package( # For non-package modules, find and yield subclasses if not is_pkg: - for name, obj in inspect.getmembers(module, inspect.isclass): + for _name, obj in inspect.getmembers(module, inspect.isclass): if issubclass(obj, base_class) and obj is not base_class and not inspect.isabstract(obj): # Build the registry name including any prefix registry_name = name_builder(_prefix, module_name) @@ -186,6 +186,6 @@ def discover_subclasses_in_loaded_modules( if any(module_name.startswith(prefix) for prefix in exclude_module_prefixes): continue - for name, obj in inspect.getmembers(module, inspect.isclass): + for _name, obj in inspect.getmembers(module, inspect.isclass): if issubclass(obj, base_class) and obj is not base_class and not inspect.isabstract(obj): yield (module_name, obj) diff --git a/pyrit/scenario/scenarios/airt/jailbreak.py b/pyrit/scenario/scenarios/airt/jailbreak.py index 20dfc50e9f..22632064b5 100644 --- a/pyrit/scenario/scenarios/airt/jailbreak.py +++ b/pyrit/scenario/scenarios/airt/jailbreak.py @@ -124,7 +124,7 @@ def __init__( scenario_result_id: Optional[str] = None, num_templates: Optional[int] = None, num_attempts: int = 1, - jailbreak_names: list[str] = [], + jailbreak_names: list[str] = None, ) -> None: """ Initialize the jailbreak scenario. @@ -147,6 +147,8 @@ def __init__( templates. """ + if jailbreak_names is None: + jailbreak_names = [] if jailbreak_names and num_templates: raise ValueError( "Please provide only one of `num_templates` (random selection) or `jailbreak_names` (specific selection)." diff --git a/pyrit/score/audio_transcript_scorer.py b/pyrit/score/audio_transcript_scorer.py index 4004dffc0d..92de08def0 100644 --- a/pyrit/score/audio_transcript_scorer.py +++ b/pyrit/score/audio_transcript_scorer.py @@ -16,7 +16,7 @@ logger = logging.getLogger(__name__) -class AudioTranscriptHelper(ABC): +class AudioTranscriptHelper(ABC): # noqa: B024 """ Abstract base class for audio scorers that process audio by transcribing and scoring the text. diff --git a/pyrit/score/float_scale/float_scale_scorer.py b/pyrit/score/float_scale/float_scale_scorer.py index 2fc9a732ed..62337102fd 100644 --- a/pyrit/score/float_scale/float_scale_scorer.py +++ b/pyrit/score/float_scale/float_scale_scorer.py @@ -103,5 +103,5 @@ async def _score_value_with_llm( score_value = score.raw_score_value if score else "None" raise InvalidJsonException( message=(f"Invalid JSON response, score_value should be a float not this: {score_value}") - ) + ) from None return score diff --git a/pyrit/score/float_scale/insecure_code_scorer.py b/pyrit/score/float_scale/insecure_code_scorer.py index ba4f65a5cd..45c64dab00 100644 --- a/pyrit/score/float_scale/insecure_code_scorer.py +++ b/pyrit/score/float_scale/insecure_code_scorer.py @@ -103,7 +103,7 @@ async def _score_piece_async(self, message_piece: MessagePiece, *, objective: Op # Attempt to use score_value if available raw_score_value = float(unvalidated_score.raw_score_value) except KeyError: - raise InvalidJsonException(message="Expected 'score_value' key missing in the JSON response") + raise InvalidJsonException(message="Expected 'score_value' key missing in the JSON response") from None # Convert UnvalidatedScore to Score, applying scaling and metadata score = unvalidated_score.to_score( diff --git a/pyrit/score/scorer.py b/pyrit/score/scorer.py index bd5a1b2294..af8bca037e 100644 --- a/pyrit/score/scorer.py +++ b/pyrit/score/scorer.py @@ -639,10 +639,10 @@ async def _score_value_with_llm( ) except json.JSONDecodeError: - raise InvalidJsonException(message=f"Invalid JSON response: {response_json}") + raise InvalidJsonException(message=f"Invalid JSON response: {response_json}") from None except KeyError: - raise InvalidJsonException(message=f"Invalid JSON response, missing Key: {response_json}") + raise InvalidJsonException(message=f"Invalid JSON response, missing Key: {response_json}") from None return score diff --git a/pyrit/score/scorer_evaluation/human_labeled_dataset.py b/pyrit/score/scorer_evaluation/human_labeled_dataset.py index d0eab8d1a3..a8634be77b 100644 --- a/pyrit/score/scorer_evaluation/human_labeled_dataset.py +++ b/pyrit/score/scorer_evaluation/human_labeled_dataset.py @@ -296,7 +296,7 @@ def from_csv( entries: list[HumanLabeledEntry] = [] for response_to_score, human_scores, objective_or_harm, data_type in zip( - responses_to_score, all_human_scores, objectives_or_harms, data_types + responses_to_score, all_human_scores, objectives_or_harms, data_types, strict=False ): response_to_score = str(response_to_score).strip() objective_or_harm = str(objective_or_harm).strip() diff --git a/pyrit/score/true_false/gandalf_scorer.py b/pyrit/score/true_false/gandalf_scorer.py index 9946a8a2c0..2aab7c264e 100644 --- a/pyrit/score/true_false/gandalf_scorer.py +++ b/pyrit/score/true_false/gandalf_scorer.py @@ -133,7 +133,7 @@ async def _check_for_password_in_conversation(self, conversation_id: str) -> str response = await self._prompt_target.send_prompt_async(message=request) response_text = response[0].get_value() except (RuntimeError, BadRequestError): - raise PyritException(message="Error in Gandalf Scorer. Unable to check for password in text.") + raise PyritException(message="Error in Gandalf Scorer. Unable to check for password in text.") from None if response_text.strip() == "NO": return "" return response_text diff --git a/pyrit/score/video_scorer.py b/pyrit/score/video_scorer.py index 8ae26ec091..53344507f9 100644 --- a/pyrit/score/video_scorer.py +++ b/pyrit/score/video_scorer.py @@ -17,7 +17,7 @@ logger = logging.getLogger(__name__) -class _BaseVideoScorer(ABC): +class _BaseVideoScorer(ABC): # noqa: B024 """ Abstract base class for video scorers that process videos by extracting frames and scoring them. diff --git a/pyrit/setup/initializers/pyrit_initializer.py b/pyrit/setup/initializers/pyrit_initializer.py index b43bf161c6..ec11d0b8d9 100644 --- a/pyrit/setup/initializers/pyrit_initializer.py +++ b/pyrit/setup/initializers/pyrit_initializer.py @@ -30,7 +30,7 @@ class PyRITInitializer(ABC): validation logic is needed. """ - def __init__(self) -> None: + def __init__(self) -> None: # noqa: B027 """Initialize the PyRIT initializer with no parameters.""" @property @@ -153,7 +153,7 @@ def _track_initialization_changes(self) -> Iterator[dict[str, Any]]: new_main_dict = sys.modules["__main__"].__dict__ # Track default values that were added - just collect class.parameter pairs - for scope, value in new_defaults.items(): + for scope, _value in new_defaults.items(): if scope not in current_default_keys: class_param = f"{scope.class_type.__name__}.{scope.parameter_name}" if class_param not in tracking_info["default_values"]: diff --git a/tests/integration/ai_recruiter/test_ai_recruiter.py b/tests/integration/ai_recruiter/test_ai_recruiter.py index cac9ab2ccf..84039a4237 100644 --- a/tests/integration/ai_recruiter/test_ai_recruiter.py +++ b/tests/integration/ai_recruiter/test_ai_recruiter.py @@ -69,7 +69,7 @@ async def evaluate_candidate_selection(final_result: str, expected_candidate: st # Get the scored response asynchronously from the scorer. scored_response = (await true_false_classifier.score_text_async(text=prompt))[0] except PyritException as e: - raise PyritException(message=f"Error during candidate evaluation: {e}") + raise PyritException(message=f"Error during candidate evaluation: {e}") from e return scored_response.get_value() is True diff --git a/tests/integration/converter/test_retry_timing_integration.py b/tests/integration/converter/test_retry_timing_integration.py index 18d48e345a..884a4b0cc1 100644 --- a/tests/integration/converter/test_retry_timing_integration.py +++ b/tests/integration/converter/test_retry_timing_integration.py @@ -33,7 +33,7 @@ async def test_translation_converter_exponential_backoff_timing(sqlite_instance) mock_send_prompt = AsyncMock(side_effect=Exception("Test failure")) start_time = time.time() - with patch.object(prompt_target, "send_prompt_async", mock_send_prompt), pytest.raises(Exception): + with patch.object(prompt_target, "send_prompt_async", mock_send_prompt), pytest.raises(Exception): # noqa: B017 await translation_converter.convert_async(prompt="hello") elapsed_time = time.time() - start_time diff --git a/tests/unit/common/test_common_default.py b/tests/unit/common/test_common_default.py index 0d432b2e72..b7e81156f0 100644 --- a/tests/unit/common/test_common_default.py +++ b/tests/unit/common/test_common_default.py @@ -21,4 +21,4 @@ def test_get_required_value_uses_default(): def test_get_required_value_throws_if_not_set(): os.environ["TEST_ENV_VAR"] = "" with pytest.raises(ValueError): - default_values.get_required_value(env_var_name="TEST_ENV_VAR", passed_value="") == "default" + default_values.get_required_value(env_var_name="TEST_ENV_VAR", passed_value="") diff --git a/tests/unit/common/test_helper_functions.py b/tests/unit/common/test_helper_functions.py index c49a00a642..e52616c23e 100644 --- a/tests/unit/common/test_helper_functions.py +++ b/tests/unit/common/test_helper_functions.py @@ -343,7 +343,7 @@ def test_verify_and_resolve_path_rejects_nonexistent(self) -> None: def test_verify_and_resolve_path_confirms_existing(self) -> None: """Test that the function verifies paths that currently exist under the scorer configs.""" full_paths: list[str] = [] - for root, dirs, files in os.walk(SCORER_SEED_PROMPT_PATH): + for root, _dirs, files in os.walk(SCORER_SEED_PROMPT_PATH): full_paths.extend([os.path.join(root, f) for f in files if f.endswith(".yaml")]) resolved_paths = [Path(p).resolve() for p in full_paths] attempted_paths = [verify_and_resolve_path(p) for p in full_paths] diff --git a/tests/unit/converter/test_add_image_video_converter.py b/tests/unit/converter/test_add_image_video_converter.py index 8e20e729bd..ec297fcd3f 100644 --- a/tests/unit/converter/test_add_image_video_converter.py +++ b/tests/unit/converter/test_add_image_video_converter.py @@ -30,7 +30,7 @@ def video_converter_sample_video(patch_central_database): video_encoding = cv2.VideoWriter_fourcc(*"mp4v") output_video = cv2.VideoWriter(video_path, video_encoding, 1, (width, height)) # Create a few frames for video - for i in range(10): + for _i in range(10): frame = np.zeros((height, width, 3), dtype=np.uint8) output_video.write(frame) output_video.release() diff --git a/tests/unit/converter/test_colloquial_wordswap_converter.py b/tests/unit/converter/test_colloquial_wordswap_converter.py index f8203511fa..6ff5775db9 100644 --- a/tests/unit/converter/test_colloquial_wordswap_converter.py +++ b/tests/unit/converter/test_colloquial_wordswap_converter.py @@ -52,7 +52,7 @@ async def test_colloquial_non_deterministic(input_text): output_words = re.findall(r"\w+|\S+", result.output_text) # Check that each wordswap is a valid substitution - for input_word, output_word in zip(input_words, output_words): + for input_word, output_word in zip(input_words, output_words, strict=False): lower_input_word = input_word.lower() if lower_input_word in valid_substitutions: diff --git a/tests/unit/converter/test_image_compression_converter.py b/tests/unit/converter/test_image_compression_converter.py index a753b0c14c..413a0fc319 100644 --- a/tests/unit/converter/test_image_compression_converter.py +++ b/tests/unit/converter/test_image_compression_converter.py @@ -370,7 +370,7 @@ async def test_image_compression_converter_corrupted_image_bytes(): mock_serializer = AsyncMock() mock_serializer.read_data.return_value = corrupted_bytes mock_factory.return_value = mock_serializer - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 await converter.convert_async(prompt="corrupted.png", input_type="image_path") diff --git a/tests/unit/converter/test_leetspeak_converter.py b/tests/unit/converter/test_leetspeak_converter.py index 28de9de77a..87b56139e1 100644 --- a/tests/unit/converter/test_leetspeak_converter.py +++ b/tests/unit/converter/test_leetspeak_converter.py @@ -66,7 +66,7 @@ def test_leetspeak_non_deterministic(input_text): # Check that each character in the output is a valid substitution assert all( char in valid_chars.get(original_char, [original_char]) - for original_char, char in zip(input_text, result.output_text) + for original_char, char in zip(input_text, result.output_text, strict=False) ) diff --git a/tests/unit/converter/test_superscript_converter.py b/tests/unit/converter/test_superscript_converter.py index fae5db11f7..6909ddbf81 100644 --- a/tests/unit/converter/test_superscript_converter.py +++ b/tests/unit/converter/test_superscript_converter.py @@ -7,7 +7,7 @@ async def _check_conversion(converter, prompts, expected_outputs): - for prompt, expected_output in zip(prompts, expected_outputs): + for prompt, expected_output in zip(prompts, expected_outputs, strict=False): result = await converter.convert_async(prompt=prompt, input_type="text") assert isinstance(result, ConverterResult) assert result.output_text == expected_output diff --git a/tests/unit/converter/test_translation_converter.py b/tests/unit/converter/test_translation_converter.py index d2da0a77e2..be0e236560 100644 --- a/tests/unit/converter/test_translation_converter.py +++ b/tests/unit/converter/test_translation_converter.py @@ -56,7 +56,7 @@ async def test_translation_converter_retries_on_exception(sqlite_instance): # Mock asyncio.sleep to avoid exponential backoff delays with patch.object(prompt_target, "send_prompt_async", mock_send_prompt): with patch("asyncio.sleep", new_callable=AsyncMock): - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 await translation_converter.convert_async(prompt="hello") assert mock_send_prompt.call_count == max_retries diff --git a/tests/unit/datasets/test_jailbreak_text.py b/tests/unit/datasets/test_jailbreak_text.py index cd10ff6012..e450309fd6 100644 --- a/tests/unit/datasets/test_jailbreak_text.py +++ b/tests/unit/datasets/test_jailbreak_text.py @@ -223,7 +223,7 @@ def teardown_method(self) -> None: def test_scan_template_files_excludes_multi_parameter(self) -> None: """Test that _scan_template_files excludes files under multi_parameter directories.""" result = TextJailBreak._scan_template_files() - for filename, paths in result.items(): + for _filename, paths in result.items(): for path in paths: assert "multi_parameter" not in path.parts diff --git a/tests/unit/datasets/test_local_dataset_loader.py b/tests/unit/datasets/test_local_dataset_loader.py index 6532c2cf29..f62170d15c 100644 --- a/tests/unit/datasets/test_local_dataset_loader.py +++ b/tests/unit/datasets/test_local_dataset_loader.py @@ -53,5 +53,5 @@ async def test_fetch_dataset(self, tmp_path, valid_yaml_content): @pytest.mark.asyncio async def test_fetch_dataset_file_not_found(self): loader = _LocalDatasetLoader(file_path=Path("non_existent.yaml")) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 await loader.fetch_dataset() diff --git a/tests/unit/executor/attack/multi_turn/test_tree_of_attacks.py b/tests/unit/executor/attack/multi_turn/test_tree_of_attacks.py index ef49184977..5c27c57893 100644 --- a/tests/unit/executor/attack/multi_turn/test_tree_of_attacks.py +++ b/tests/unit/executor/attack/multi_turn/test_tree_of_attacks.py @@ -380,7 +380,7 @@ def create_threshold_score(*, original_float_value: float, threshold: float = 0. @staticmethod def add_nodes_to_tree(context: TAPAttackContext, nodes: list[_TreeOfAttacksNode], parent: str = "root"): """Add nodes to the context's tree visualization.""" - for i, node in enumerate(nodes): + for _i, node in enumerate(nodes): score_str = "" if node.objective_score: score_str = f": Score {node.objective_score.get_value()}" diff --git a/tests/unit/memory/memory_interface/test_interface_prompts.py b/tests/unit/memory/memory_interface/test_interface_prompts.py index d62e962265..99f973f3f1 100644 --- a/tests/unit/memory/memory_interface/test_interface_prompts.py +++ b/tests/unit/memory/memory_interface/test_interface_prompts.py @@ -606,7 +606,7 @@ def test_duplicate_conversation_with_multiple_pieces(sqlite_instance: MemoryInte # Sequences and roles should be preserved for orig, new in zip( - sorted(original_pieces, key=lambda p: p.sequence), sorted(new_pieces, key=lambda p: p.sequence) + sorted(original_pieces, key=lambda p: p.sequence), sorted(new_pieces, key=lambda p: p.sequence), strict=False ): assert orig.sequence == new.sequence assert orig.api_role == new.api_role @@ -1030,7 +1030,7 @@ def test_get_message_pieces_sorts( if new_value != current_value and any( o.conversation_id == current_value for o in response[response.index(obj) :] ): - assert False, "Conversation IDs are not grouped together" + raise AssertionError("Conversation IDs are not grouped together") def test_message_piece_scores_duplicate_piece(sqlite_instance: MemoryInterface): diff --git a/tests/unit/memory/test_memory_exporter.py b/tests/unit/memory/test_memory_exporter.py index 91cc79de5c..028cd5b833 100644 --- a/tests/unit/memory/test_memory_exporter.py +++ b/tests/unit/memory/test_memory_exporter.py @@ -60,7 +60,7 @@ def test_export_to_json_creates_file(tmp_path, export_type): # Convert each MessagePiece instance to a dictionary expected_content = [message_piece.to_dict() for message_piece in sample_conversation_entries] - for expected, actual in zip(expected_content, content): + for expected, actual in zip(expected_content, content, strict=False): assert expected["role"] == actual["role"] assert expected["converted_value"] == actual["converted_value"] assert expected["conversation_id"] == actual["conversation_id"] diff --git a/tests/unit/models/test_message.py b/tests/unit/models/test_message.py index c94a733ab9..49d43db346 100644 --- a/tests/unit/models/test_message.py +++ b/tests/unit/models/test_message.py @@ -125,7 +125,7 @@ def test_duplicate_message_creates_new_ids(self, message: Message) -> None: duplicated_ids = [piece.id for piece in duplicated.message_pieces] # Verify new IDs are different from original - for orig_id, dup_id in zip(original_ids, duplicated_ids): + for orig_id, dup_id in zip(original_ids, duplicated_ids, strict=False): assert orig_id != dup_id # Verify duplicated IDs are unique @@ -135,7 +135,7 @@ def test_duplicate_message_preserves_content(self, message: Message) -> None: """Test that duplicate_message preserves all content fields.""" duplicated = message.duplicate_message() - for orig_piece, dup_piece in zip(message.message_pieces, duplicated.message_pieces): + for orig_piece, dup_piece in zip(message.message_pieces, duplicated.message_pieces, strict=False): assert orig_piece.original_value == dup_piece.original_value assert orig_piece.converted_value == dup_piece.converted_value assert orig_piece.api_role == dup_piece.api_role @@ -147,7 +147,7 @@ def test_duplicate_message_preserves_original_prompt_id(self, message: Message) """Test that duplicate_message preserves original_prompt_id for tracing.""" duplicated = message.duplicate_message() - for orig_piece, dup_piece in zip(message.message_pieces, duplicated.message_pieces): + for orig_piece, dup_piece in zip(message.message_pieces, duplicated.message_pieces, strict=False): assert orig_piece.original_prompt_id == dup_piece.original_prompt_id def test_duplicate_message_creates_new_timestamp(self, message: Message) -> None: diff --git a/tests/unit/score/test_conversation_history_scorer.py b/tests/unit/score/test_conversation_history_scorer.py index 15c8a31ac8..3e5d1ac23e 100644 --- a/tests/unit/score/test_conversation_history_scorer.py +++ b/tests/unit/score/test_conversation_history_scorer.py @@ -369,7 +369,7 @@ def test_factory_preserves_wrapped_scorer(): assert isinstance(conv_scorer, ConversationScorer) # Access via attribute since _get_wrapped_scorer is available at runtime assert hasattr(conv_scorer, "_wrapped_scorer") - wrapped = getattr(conv_scorer, "_wrapped_scorer") + wrapped = conv_scorer._wrapped_scorer assert wrapped is original_scorer assert wrapped.custom_attr == "test_value" # type: ignore diff --git a/tests/unit/score/test_video_scorer.py b/tests/unit/score/test_video_scorer.py index 90d9ebeb44..9bd00a0b6c 100644 --- a/tests/unit/score/test_video_scorer.py +++ b/tests/unit/score/test_video_scorer.py @@ -41,7 +41,7 @@ def video_converter_sample_video(patch_central_database): video_encoding = cv2.VideoWriter_fourcc(*"mp4v") output_video = cv2.VideoWriter(video_path, video_encoding, 20, (width, height)) # Create a few frames for video - for i in range(10): + for _i in range(10): frame = np.zeros((height, width, 3), dtype=np.uint8) processed_frame = cv2.flip(frame, 0) output_video.write(processed_frame) diff --git a/tests/unit/target/test_azure_ml_chat_target.py b/tests/unit/target/test_azure_ml_chat_target.py index 6c49d4a014..a8cc386d16 100644 --- a/tests/unit/target/test_azure_ml_chat_target.py +++ b/tests/unit/target/test_azure_ml_chat_target.py @@ -130,7 +130,7 @@ async def test_send_prompt_async_bad_request_error_adds_to_memory(aml_online_cha mock_complete_chat_async = AsyncMock( side_effect=HTTPStatusError(message="Bad Request", request=MagicMock(), response=response) ) - setattr(aml_online_chat, "_complete_chat_async", mock_complete_chat_async) + aml_online_chat._complete_chat_async = mock_complete_chat_async message = Message(message_pieces=[MessagePiece(role="user", conversation_id="123", original_value="Hello")]) with pytest.raises(HTTPStatusError) as bre: @@ -154,7 +154,7 @@ async def test_send_prompt_async_rate_limit_exception_adds_to_memory(aml_online_ mock_complete_chat_async = AsyncMock( side_effect=HTTPStatusError(message="Rate Limit Reached", request=MagicMock(), response=response) ) - setattr(aml_online_chat, "_complete_chat_async", mock_complete_chat_async) + aml_online_chat._complete_chat_async = mock_complete_chat_async message = Message(message_pieces=[MessagePiece(role="user", conversation_id="123", original_value="Hello")]) with pytest.raises(RateLimitException) as rle: @@ -172,7 +172,7 @@ async def test_send_prompt_async_rate_limit_exception_retries(aml_online_chat: A mock_complete_chat_async = AsyncMock( side_effect=RateLimitError("Rate Limit Reached", response=response, body="Rate limit reached") ) - setattr(aml_online_chat, "_complete_chat_async", mock_complete_chat_async) + aml_online_chat._complete_chat_async = mock_complete_chat_async message = Message(message_pieces=[MessagePiece(role="user", conversation_id="12345", original_value="Hello")]) with pytest.raises(RateLimitError): @@ -188,7 +188,7 @@ async def test_send_prompt_async_empty_response_retries(aml_online_chat: AzureML mock_complete_chat_async = AsyncMock() mock_complete_chat_async.return_value = None - setattr(aml_online_chat, "_complete_chat_async", mock_complete_chat_async) + aml_online_chat._complete_chat_async = mock_complete_chat_async message = Message(message_pieces=[MessagePiece(role="user", conversation_id="12345", original_value="Hello")]) with pytest.raises(EmptyResponseException): diff --git a/tests/unit/target/test_image_target.py b/tests/unit/target/test_image_target.py index d25655f6c9..ffba3a7564 100644 --- a/tests/unit/target/test_image_target.py +++ b/tests/unit/target/test_image_target.py @@ -245,7 +245,7 @@ async def test_send_prompt_async_bad_request_error( mock_generate.side_effect = bad_request_error # Non-content-filter BadRequestError should be re-raised (same as chat target behavior) - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017 await image_target.send_prompt_async(message=Message([request])) diff --git a/tests/unit/target/test_openai_chat_target.py b/tests/unit/target/test_openai_chat_target.py index 322e4936da..b90ec97f2b 100644 --- a/tests/unit/target/test_openai_chat_target.py +++ b/tests/unit/target/test_openai_chat_target.py @@ -379,7 +379,7 @@ async def test_send_prompt_async_bad_request_error_adds_to_memory(target: OpenAI target._async_client.chat.completions.create = AsyncMock(side_effect=side_effect) # type: ignore[method-assign] # Non-content-filter BadRequestError should be re-raised - with pytest.raises(Exception): # Will raise since handle_bad_request_exception re-raises non-content-filter errors + with pytest.raises(Exception): # noqa: B017 # Will raise since handle_bad_request_exception re-raises non-content-filter errors await target.send_prompt_async(message=message) @@ -507,7 +507,7 @@ async def test_send_prompt_async_bad_request_error(target: OpenAIChatTarget): target._async_client.chat.completions.create = AsyncMock(side_effect=side_effect) # type: ignore[method-assign] # Non-content-filter BadRequestError should be re-raised - with pytest.raises(Exception): # Will raise since handle_bad_request_exception re-raises non-content-filter errors + with pytest.raises(Exception): # noqa: B017 # Will raise since handle_bad_request_exception re-raises non-content-filter errors await target.send_prompt_async(message=message) diff --git a/tests/unit/target/test_playwright_target.py b/tests/unit/target/test_playwright_target.py index 3f9502c1c0..c62a8bb818 100644 --- a/tests/unit/target/test_playwright_target.py +++ b/tests/unit/target/test_playwright_target.py @@ -274,7 +274,7 @@ def test_protocol_interaction_function_signature(self): from pyrit.prompt_target.playwright_target import InteractionFunction # Check that the protocol exists and has the right signature - assert hasattr(InteractionFunction, "__call__") + assert callable(InteractionFunction) @pytest.mark.asyncio async def test_interaction_function_receives_complete_request(self, mock_page, multiple_text_pieces):