From 50542755efbed8ce5cf5d82cb8319e71b7b102fe Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Wed, 29 Apr 2026 22:31:56 -0400 Subject: [PATCH 01/26] feat(workflows): support file-backed inputs --- docs/reference/workflows.md | 13 ++- src/specify_cli/__init__.py | 105 ++++++++++++++++++-- tests/test_workflows.py | 188 ++++++++++++++++++++++++++++++++++++ 3 files changed, 291 insertions(+), 15 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index e7e921e1e9..fadff19656 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -8,16 +8,19 @@ Workflows automate multi-step Spec-Driven Development processes — chaining com specify workflow run ``` -| Option | Description | -| ------------------- | -------------------------------------------------------- | -| `-i` / `--input` | Pass input values as `key=value` (repeatable) | +| Option | Description | +| ------------------- | ------------------------------------------------------------------------------------------------ | +| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | +| `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from a catalog ID, URL, or local file path. Inputs declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from a catalog ID, URL, or local file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. Example: ```bash -specify workflow run speckit -i spec="Build a kanban board with drag-and-drop task management" -i scope=full +specify workflow run ./workflow.yml -i prompt="Build a workflow" -i scope=full +specify workflow run ./workflow.yml --input prompt=@docs/prompt.md +specify workflow run ./workflow.yml --input-file payload.json -i scope=full ``` > **Note:** All workflow commands require a project already initialized with `specify init`. diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index ccd670d20e..7b06892bb4 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5257,11 +5257,100 @@ def extension_set_priority( workflow_app.add_typer(workflow_catalog_app, name="catalog") +def _resolve_workflow_cli_path(raw_path: str) -> Path: + """Resolve workflow CLI file paths from the current working directory.""" + path = Path(raw_path).expanduser() + if not path.is_absolute(): + path = Path.cwd() / path + return path + + +def _read_workflow_cli_file(raw_path: str, description: str) -> tuple[Path, str]: + """Read a text file referenced by a workflow CLI input option.""" + cleaned_path = raw_path.strip() + if not cleaned_path: + raise ValueError(f"Missing file path for {description}.") + + path = _resolve_workflow_cli_path(cleaned_path) + if not path.exists(): + raise ValueError(f"File for {description} not found: {path}") + if not path.is_file(): + raise ValueError(f"Path for {description} is not a file: {path}") + + try: + return path, path.read_text(encoding="utf-8") + except UnicodeDecodeError as exc: + raise ValueError( + f"Unable to read file for {description} as UTF-8 text: {path}" + ) from exc + except OSError as exc: + raise ValueError( + f"Unable to read file for {description}: {path} ({exc})" + ) from exc + + +def _load_workflow_input_file(input_file: str) -> dict[str, Any]: + """Load workflow inputs from a JSON object file.""" + path, raw_json = _read_workflow_cli_file(input_file, "--input-file") + try: + data = json.loads(raw_json) + except json.JSONDecodeError as exc: + raise ValueError( + f"Invalid JSON in --input-file {path}: " + f"{exc.msg} at line {exc.lineno}, column {exc.colno}" + ) from exc + + if not isinstance(data, dict): + raise ValueError( + f"--input-file must contain a JSON object, got {type(data).__name__}." + ) + return data + + +def _parse_workflow_inputs( + input_values: list[str] | None, + input_file: str | None, +) -> dict[str, Any]: + """Normalize workflow CLI input options into the engine input dict.""" + inputs: dict[str, Any] = {} + + if input_file is not None: + inputs.update(_load_workflow_input_file(input_file)) + + if input_values: + for kv in input_values: + if "=" not in kv: + raise ValueError( + f"Invalid input format: {kv!r} (expected key=value)" + ) + key, _, raw_value = kv.partition("=") + key = key.strip() + if not key: + raise ValueError( + f"Invalid input format: {kv!r} (key cannot be empty)" + ) + + value = raw_value.strip() + if value.startswith("@"): + file_ref = value[1:].strip() + if file_ref and _resolve_workflow_cli_path(file_ref).exists(): + _, value = _read_workflow_cli_file(file_ref, f"input {key!r}") + inputs[key] = value + + return inputs + + @workflow_app.command("run") def workflow_run( source: str = typer.Argument(..., help="Workflow ID or YAML file path"), input_values: list[str] | None = typer.Option( - None, "--input", "-i", help="Input values as key=value pairs" + None, + "--input", + "-i", + help="Input values as key=value pairs; use key=@path to read a text file", + ), + input_file: str | None = typer.Option( + None, "--input-file", help="Load input values from a JSON object file" ), ): """Run a workflow from an installed ID or local YAML path.""" @@ -5288,15 +5377,11 @@ def workflow_run( console.print(f" • {err}") raise typer.Exit(1) - # Parse inputs - inputs: dict[str, Any] = {} - if input_values: - for kv in input_values: - if "=" not in kv: - console.print(f"[red]Error:[/red] Invalid input format: {kv!r} (expected key=value)") - raise typer.Exit(1) - key, _, value = kv.partition("=") - inputs[key.strip()] = value.strip() + try: + inputs = _parse_workflow_inputs(input_values, input_file) + except ValueError as exc: + console.print(f"[red]Error:[/red] {exc}") + raise typer.Exit(1) console.print(f"\n[bold cyan]Running workflow:[/bold cyan] {definition.name} ({definition.id})") console.print(f"[dim]Version: {definition.version}[/dim]\n") diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 4c042fc7d5..d4c4d4b302 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -84,6 +84,194 @@ def sample_workflow_file(project_dir, sample_workflow_yaml): return wf_path +# ===== Workflow CLI Input Tests ===== + +class TestWorkflowCliInputs: + """Test workflow run input normalization at the CLI boundary.""" + + def test_inline_input_still_works(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["spec=Build a kanban board", "scope=full"], + None, + ) + + assert inputs == { + "spec": "Build a kanban board", + "scope": "full", + } + + def test_at_file_input_reads_file_contents_for_generic_key( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + desc_file = project_dir / "desc.md" + desc_text = "# Description\n\nBuild a workflow.\n" + desc_file.write_text(desc_text, encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["description=@desc.md"], None) + + assert inputs == {"description": desc_text} + + @pytest.mark.parametrize("literal", ["@alice", "@"]) + def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs([f"assignee={literal}"], None) + + assert inputs == {"assignee": literal} + + def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not found"): + _parse_workflow_inputs(None, "missing.json") + + def test_input_file_loads_json_object(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(None, "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "full", + } + + def test_direct_input_overrides_input_file(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["scope=minimal"], "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "minimal", + } + + def test_invalid_json_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text("{invalid json", encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="Invalid JSON"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize("payload", ["[]", '"not an object"']) + def test_non_object_json_input_file_fails_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="JSON object"): + _parse_workflow_inputs(None, "payload.json") + + def test_malformed_inline_input_fails_cleanly(self): + from specify_cli import _parse_workflow_inputs + + with pytest.raises(ValueError, match="expected key=value"): + _parse_workflow_inputs(["spec"], None) + + def test_workflow_run_passes_normalized_inputs_to_engine( + self, + project_dir, + monkeypatch, + ): + from typer.testing import CliRunner + from specify_cli import app + from specify_cli.workflows import engine as engine_module + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"spec": "Build a kanban board", "scope": "minimal"}), + encoding="utf-8", + ) + captured: dict[str, object] = {} + + class FakeDefinition: + id = "speckit" + name = "Spec Kit" + version = "1.0.0" + + class FakeStatus: + value = "completed" + + class FakeState: + status = FakeStatus() + run_id = "run-1" + + class FakeWorkflowEngine: + def __init__(self, project_root): + self.project_root = project_root + self.on_step_start = None + + def load_workflow(self, source): + captured["source"] = source + return FakeDefinition() + + def validate(self, definition): + return [] + + def execute(self, definition, inputs): + captured["inputs"] = inputs + return FakeState() + + monkeypatch.setattr(engine_module, "WorkflowEngine", FakeWorkflowEngine) + monkeypatch.chdir(project_dir) + + result = CliRunner().invoke( + app, + [ + "workflow", + "run", + "speckit", + "--input-file", + "payload.json", + "--input", + "scope=full", + ], + ) + + assert result.exit_code == 0, result.output + assert captured["source"] == "speckit" + assert captured["inputs"] == { + "spec": "Build a kanban board", + "scope": "full", + } + + # ===== Step Registry Tests ===== class TestStepRegistry: From 48c91b0e443a8d005c965c95d3096c8bb4441fbc Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 00:42:26 -0400 Subject: [PATCH 02/26] Fix workflow @input directory handling --- src/specify_cli/__init__.py | 8 ++++++-- tests/test_workflows.py | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 7b06892bb4..6ff2248d77 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5333,8 +5333,12 @@ def _parse_workflow_inputs( value = raw_value.strip() if value.startswith("@"): file_ref = value[1:].strip() - if file_ref and _resolve_workflow_cli_path(file_ref).exists(): - _, value = _read_workflow_cli_file(file_ref, f"input {key!r}") + if file_ref: + candidate_path = _resolve_workflow_cli_path(file_ref) + if candidate_path.exists() and candidate_path.is_file(): + _, value = _read_workflow_cli_file( + file_ref, f"input {key!r}" + ) inputs[key] = value return inputs diff --git a/tests/test_workflows.py b/tests/test_workflows.py index d4c4d4b302..58db0ba508 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -130,6 +130,18 @@ def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): assert inputs == {"assignee": literal} + def test_existing_at_directory_stays_literal(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "some_existing_directory").mkdir() + monkeypatch.chdir(project_dir) + + assert _parse_workflow_inputs(["x=@."], None) == {"x": "@."} + assert _parse_workflow_inputs( + ["x=@some_existing_directory"], + None, + ) == {"x": "@some_existing_directory"} + def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -138,6 +150,15 @@ def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): with pytest.raises(ValueError, match="not found"): _parse_workflow_inputs(None, "missing.json") + def test_input_file_directory_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "payload.json").mkdir() + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not a file"): + _parse_workflow_inputs(None, "payload.json") + def test_input_file_loads_json_object(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs From 7ba8ea03b5021d64a1ff772fe35a59ba32e08ae8 Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 00:51:56 -0400 Subject: [PATCH 03/26] Clarify workflow run source docs --- docs/reference/workflows.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index fadff19656..086b559da4 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -13,7 +13,7 @@ specify workflow run | `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | | `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from a catalog ID, URL, or local file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. Example: From 37d6d44c71e7e1d03b6d0a4c43a6245061cc520b Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 01:01:49 -0400 Subject: [PATCH 04/26] Clarify workflow input file reference docs --- docs/reference/workflows.md | 4 ++-- src/specify_cli/__init__.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index 086b559da4..e350aecdb0 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -10,10 +10,10 @@ specify workflow run | Option | Description | | ------------------- | ------------------------------------------------------------------------------------------------ | -| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | +| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); `key=@path` reads an existing text file, otherwise `@` values stay literal | | `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or `--input-file`, or will be prompted interactively. Example: diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 6ff2248d77..424990d9fd 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5351,7 +5351,10 @@ def workflow_run( None, "--input", "-i", - help="Input values as key=value pairs; use key=@path to read a text file", + help=( + "Input values as key=value pairs; key=@path reads an existing text " + "file, otherwise @ values stay literal" + ), ), input_file: str | None = typer.Option( None, "--input-file", help="Load input values from a JSON object file" From 042dadc0f0b13e7cee97d0beac564a54e470c186 Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Mon, 4 May 2026 11:06:04 -0400 Subject: [PATCH 05/26] Address workflow file input review feedback --- docs/reference/workflows.md | 4 +- src/specify_cli/__init__.py | 61 ++++++++++++++++++++- tests/test_workflows.py | 105 ++++++++++++++++++++++++++++++++++++ 3 files changed, 167 insertions(+), 3 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index e350aecdb0..2b02ff3e1b 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -11,7 +11,7 @@ specify workflow run | Option | Description | | ------------------- | ------------------------------------------------------------------------------------------------ | | `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); `key=@path` reads an existing text file, otherwise `@` values stay literal | -| `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | +| `--input-file` | Load workflow inputs/parameters from a JSON object file with string, number, or boolean values; repeatable `--input` values override file values | Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or `--input-file`, or will be prompted interactively. @@ -23,6 +23,8 @@ specify workflow run ./workflow.yml --input prompt=@docs/prompt.md specify workflow run ./workflow.yml --input-file payload.json -i scope=full ``` +For boolean, number, and enum-constrained inputs, surrounding whitespace from file-backed string values is trimmed before normal workflow input coercion. Free-form string inputs preserve file contents. + > **Note:** All workflow commands require a project already initialized with `specify init`. ## Resume a Workflow diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 424990d9fd..78f18cac33 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -34,6 +34,7 @@ import shutil import json import json5 +import math import stat import shlex import urllib.error @@ -5289,6 +5290,36 @@ def _read_workflow_cli_file(raw_path: str, description: str) -> tuple[Path, str] ) from exc +def _json_type_name(value: Any) -> str: + """Return a user-facing JSON type name for validation errors.""" + if value is None: + return "null" + if isinstance(value, dict): + return "object" + if isinstance(value, list): + return "array" + if isinstance(value, bool): + return "boolean" + if isinstance(value, (int, float)): + return "number" + if isinstance(value, str): + return "string" + return type(value).__name__ + + +def _validate_workflow_input_file_value(key: str, value: Any) -> None: + """Ensure --input-file values match the supported workflow input scalars.""" + if isinstance(value, float) and not math.isfinite(value): + raise ValueError( + f"--input-file value for {key!r} must be a finite number." + ) + if not isinstance(value, (str, int, float, bool)): + raise ValueError( + f"--input-file value for {key!r} must be a string, number, " + f"or boolean, got {_json_type_name(value)}." + ) + + def _load_workflow_input_file(input_file: str) -> dict[str, Any]: """Load workflow inputs from a JSON object file.""" path, raw_json = _read_workflow_cli_file(input_file, "--input-file") @@ -5304,18 +5335,40 @@ def _load_workflow_input_file(input_file: str) -> dict[str, Any]: raise ValueError( f"--input-file must contain a JSON object, got {type(data).__name__}." ) + for key, value in data.items(): + _validate_workflow_input_file_value(str(key), value) return data +def _normalize_workflow_cli_scalar( + value: Any, + input_def: dict[str, Any] | None, +) -> Any: + """Normalize file-backed scalars when workflow coercion expects scalars.""" + if not isinstance(value, str) or not isinstance(input_def, dict): + return value + + input_type = input_def.get("type", "string") + if input_type in ("number", "boolean") or input_def.get("enum") is not None: + return value.strip() + return value + + def _parse_workflow_inputs( input_values: list[str] | None, input_file: str | None, + input_definitions: dict[str, Any] | None = None, ) -> dict[str, Any]: """Normalize workflow CLI input options into the engine input dict.""" inputs: dict[str, Any] = {} + input_definitions = input_definitions or {} if input_file is not None: - inputs.update(_load_workflow_input_file(input_file)) + for key, value in _load_workflow_input_file(input_file).items(): + inputs[key] = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) if input_values: for kv in input_values: @@ -5339,6 +5392,10 @@ def _parse_workflow_inputs( _, value = _read_workflow_cli_file( file_ref, f"input {key!r}" ) + value = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) inputs[key] = value return inputs @@ -5385,7 +5442,7 @@ def workflow_run( raise typer.Exit(1) try: - inputs = _parse_workflow_inputs(input_values, input_file) + inputs = _parse_workflow_inputs(input_values, input_file, definition.inputs) except ValueError as exc: console.print(f"[red]Error:[/red] {exc}") raise typer.Exit(1) diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 58db0ba508..1128aacf36 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -120,6 +120,34 @@ def test_at_file_input_reads_file_contents_for_generic_key( assert inputs == {"description": desc_text} + def test_at_file_input_normalizes_typed_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + (project_dir / "enabled.txt").write_text("true\n", encoding="utf-8") + (project_dir / "scope.txt").write_text("full\n", encoding="utf-8") + (project_dir / "notes.md").write_text("line one\n", encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["enabled=@enabled.txt", "scope=@scope.txt", "notes=@notes.md"], + None, + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "notes": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "notes": "line one\n", + } + @pytest.mark.parametrize("literal", ["@alice", "@"]) def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -176,6 +204,40 @@ def test_input_file_loads_json_object(self, project_dir, monkeypatch): "scope": "full", } + def test_input_file_normalizes_typed_string_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({ + "enabled": "true\n", + "scope": "full\n", + "prompt": "Keep trailing newline\n", + }), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + None, + "payload.json", + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "prompt": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "prompt": "Keep trailing newline\n", + } + def test_direct_input_overrides_input_file(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -219,6 +281,48 @@ def test_non_object_json_input_file_fails_cleanly( with pytest.raises(ValueError, match="JSON object"): _parse_workflow_inputs(None, "payload.json") + @pytest.mark.parametrize( + "payload", + [ + {"spec": {"text": "Build a workflow"}}, + {"spec": ["Build a workflow"]}, + {"spec": None}, + ], + ) + def test_non_scalar_json_input_file_values_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(json.dumps(payload), encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="string, number, or boolean"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize( + "payload", + ['{"spec": NaN}', '{"spec": Infinity}', '{"spec": 1e999}'], + ) + def test_non_finite_json_input_file_numbers_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="finite number"): + _parse_workflow_inputs(None, "payload.json") + def test_malformed_inline_input_fails_cleanly(self): from specify_cli import _parse_workflow_inputs @@ -245,6 +349,7 @@ class FakeDefinition: id = "speckit" name = "Spec Kit" version = "1.0.0" + inputs = {} class FakeStatus: value = "completed" From 521b0d9ef76f01b466508d76bdd86ce8a43ee48f Mon Sep 17 00:00:00 2001 From: Dyan Galih Date: Mon, 4 May 2026 22:07:58 +0700 Subject: [PATCH 06/26] update security-review and memory-md extensions to latest versions (#2445) * chore: update security-review extension to v1.4.2 * chore: update memory-md description and catalog updated_at --- README.md | 2 +- extensions/catalog.community.json | 27 ++++++++++++++------------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 102c55ecb0..801e962549 100644 --- a/README.md +++ b/README.md @@ -230,7 +230,7 @@ The following community-contributed extensions are available in [`catalog.commun | MAQA Trello Integration | Trello board integration for MAQA — populates board from specs, moves cards, real-time checklist ticking | `integration` | Read+Write | [spec-kit-maqa-trello](https://github.com/GenieRobot/spec-kit-maqa-trello) | | MarkItDown Document Converter | Convert documents (PDF, Word, PowerPoint, Excel, and more) to Markdown for use as spec reference material | `docs` | Read+Write | [spec-kit-markitdown](https://github.com/BenBtg/spec-kit-markitdown) | | Memory Loader | Loads .specify/memory/ files before lifecycle commands so LLM agents have project governance context | `docs` | Read-only | [spec-kit-memory-loader](https://github.com/KevinBrown5280/spec-kit-memory-loader) | -| Memory MD | Repository-native durable memory for Spec Kit projects | `docs` | Read+Write | [spec-kit-memory-hub](https://github.com/DyanGalih/spec-kit-memory-hub) | +| Memory MD | Spec Kit extension for repository-native Markdown memory that captures durable decisions, bugs, and project context | `docs` | Read+Write | [spec-kit-memory-hub](https://github.com/DyanGalih/spec-kit-memory-hub) | | MemoryLint | Agent memory governance tool: Automatically audits and fixes boundary conflicts between AGENTS.md and the constitution. | `process` | Read+Write | [memorylint](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/memorylint) | | Microsoft 365 Integration | Fetch Teams messages, meeting transcripts, and SharePoint/OneDrive files as local Markdown for spec generation | `integration` | Read+Write | [spec-kit-m365](https://github.com/BenBtg/spec-kit-m365) | | Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) | diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index a952e8b9cc..c8361286cf 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-05-01T15:01:47Z", + "updated_at": "2026-05-03T00:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", "extensions": { "aide": { @@ -1278,20 +1278,20 @@ "memory-md": { "name": "Memory MD", "id": "memory-md", - "description": "Repository-native durable memory for Spec Kit projects", + "description": "Spec Kit extension for repository-native Markdown memory that captures durable decisions, bugs, and project context", "author": "DyanGalih", - "version": "0.6.9", - "download_url": "https://github.com/DyanGalih/spec-kit-memory-hub/archive/refs/tags/v0.6.9.zip", + "version": "0.7.5", + "download_url": "https://github.com/DyanGalih/spec-kit-memory-hub/archive/refs/tags/v0.7.5.zip", "repository": "https://github.com/DyanGalih/spec-kit-memory-hub", "homepage": "https://github.com/DyanGalih/spec-kit-memory-hub", "documentation": "https://github.com/DyanGalih/spec-kit-memory-hub/blob/main/README.md", - "changelog": "https://github.com/DyanGalih/spec-kit-memory-hub/blob/main/docs/memory-workflow-v0.6.md", + "changelog": "https://github.com/DyanGalih/spec-kit-memory-hub/blob/main/CHANGELOG.md", "license": "MIT", "requires": { - "speckit_version": ">=0.6.0" + "speckit_version": ">=0.2.0" }, "provides": { - "commands": 5, + "commands": 6, "hooks": 0 }, "tags": [ @@ -1299,13 +1299,14 @@ "workflow", "docs", "copilot", - "markdown" + "markdown", + "ai-context" ], "verified": false, "downloads": 0, "stars": 0, "created_at": "2026-04-23T00:00:00Z", - "updated_at": "2026-05-01T14:48:00Z" + "updated_at": "2026-05-03T00:00:00Z" }, "memorylint": { "name": "MemoryLint", @@ -1931,8 +1932,8 @@ "id": "security-review", "description": "Full-project secure-by-design security audits plus staged, branch/PR, plan, task, follow-up, and apply reviews", "author": "DyanGalih", - "version": "1.3.3", - "download_url": "https://github.com/DyanGalih/spec-kit-security-review/archive/refs/tags/v1.3.3.zip", + "version": "1.4.2", + "download_url": "https://github.com/DyanGalih/spec-kit-security-review/archive/refs/tags/v1.4.2.zip", "repository": "https://github.com/DyanGalih/spec-kit-security-review", "homepage": "https://github.com/DyanGalih/spec-kit-security-review", "documentation": "https://github.com/DyanGalih/spec-kit-security-review/blob/main/README.md", @@ -1942,7 +1943,7 @@ "speckit_version": ">=0.1.0" }, "provides": { - "commands": 6, + "commands": 7, "hooks": 0 }, "tags": [ @@ -1956,7 +1957,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-04-03T03:24:03Z", - "updated_at": "2026-05-01T14:48:00Z" + "updated_at": "2026-05-03T00:00:00Z" }, "sf": { "name": "SFSpeckit — Salesforce Spec-Driven Development", From 05d9aa3e90f1e9c6afd7b7a35b01021abb4dabba Mon Sep 17 00:00:00 2001 From: Alex Vieira Date: Mon, 4 May 2026 17:35:18 +0100 Subject: [PATCH 07/26] feat(presets): add Spec2Cloud preset for Azure deployment workflow (#2413) * feat(presets): add Spec2Cloud preset for Azure deployment workflow Co-authored-by: Copilot * feat(presets): add Spec2Cloud preset details to community catalog * fix(presets): update Spec2Cloud URL to point to the correct GitHub repository * feat(presets): update Spec2Cloud entry with created_at and updated_at timestamps * feat(presets): update Spec2Cloud version to 1.1.0 and adjust timestamps * Potential fix for pull request finding Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> * fix: update spec2cloud preset details and resolve merge conflicts * fix: reorder Spec2Cloud entry in community presets for consistency --------- Co-authored-by: Copilot Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --- docs/community/presets.md | 1 + presets/catalog.community.json | 29 ++++++++++++++++++++++++++++- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/docs/community/presets.md b/docs/community/presets.md index 15f2b7c9ff..b1eaffd318 100644 --- a/docs/community/presets.md +++ b/docs/community/presets.md @@ -21,6 +21,7 @@ The following community-contributed presets customize how Spec Kit behaves — o | Pirate Speak (Full) | Transforms all Spec Kit output into pirate speak — specs become "Voyage Manifests", plans become "Battle Plans", tasks become "Crew Assignments" | 6 templates, 9 commands | — | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) | | Screenwriting | Spec-Driven Development for screenwriting/scriptwriting/tutorials: feature films, television (pilot, episode, limited series), and stage plays. Adapts the Spec Kit workflow to screenplay craft — slug lines, action lines, act breaks, beat sheets, and industry-standard pitch documents. Supports three-act, Save the Cat, TV pilot, network episode, cable/streaming episode, and stage-play structural frameworks. Export to Fountain, FTX, PDF | 26 templates, 32 commands, 1 script | — | [speckit-preset-screenwriting](https://github.com/adaumann/speckit-preset-screenwriting) | | Security Governance | Adds secure development governance: memory-safe-language preference, secure code generation, NIST SSDF, CWE Top 25, OWASP ASVS, SBOM/VEX/SLSA, OpenSSF Scorecard, and EU CRA applicability | 12 templates, 3 commands | — | [spec-kit-preset-security-governance](https://github.com/hindermath/spec-kit-preset-security-governance) | +| Spec2Cloud | Spec-driven workflow tuned for shipping to Azure: spec → plan → tasks → implement → deploy | 5 templates, 8 commands | — | [spec2cloud](https://github.com/Azure-Samples/Spec2Cloud) | | Table of Contents Navigation | Adds a navigable Table of Contents to generated spec.md, plan.md, and tasks.md documents | 3 templates, 3 commands | — | [spec-kit-preset-toc-navigation](https://github.com/Quratulain-bilal/spec-kit-preset-toc-navigation) | | VS Code Ask Questions | Enhances the clarify command to use `vscode/askQuestions` for batched interactive questioning. | 1 command | — | [spec-kit-presets](https://github.com/fdcastel/spec-kit-presets) | diff --git a/presets/catalog.community.json b/presets/catalog.community.json index 8064bfc960..7791365fef 100644 --- a/presets/catalog.community.json +++ b/presets/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-05-05T10:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/presets/catalog.community.json", "presets": { "a11y-governance": { @@ -401,6 +401,33 @@ "created_at": "2026-04-27T00:00:00Z", "updated_at": "2026-04-27T00:00:00Z" }, + "spec2cloud": { + "name": "Spec2Cloud", + "id": "spec2cloud", + "version": "1.1.0", + "description": "Spec-driven workflow tuned for shipping to Azure: spec → plan → tasks → implement → deploy.", + "author": "Azure Samples", + "repository": "https://github.com/Azure-Samples/Spec2Cloud", + "download_url": "https://github.com/Azure-Samples/Spec2Cloud/releases/download/spec-kit-spec2cloud-v1.1.0/preset.zip", + "homepage": "https://aka.ms/spec2cloud", + "documentation": "https://github.com/Azure-Samples/Spec2Cloud/blob/main/spec-kit/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.1.0" + }, + "provides": { + "templates": 5, + "commands": 8 + }, + "tags": [ + "azure", + "spec2cloud", + "workflow", + "deployment" + ], + "created_at": "2026-04-30T00:00:00Z", + "updated_at": "2026-04-30T00:00:00Z" + }, "toc-navigation": { "name": "Table of Contents Navigation", "id": "toc-navigation", From f47c2eb468080bbda5638983d668b02731ac8a6c Mon Sep 17 00:00:00 2001 From: Manfred Riem <15701806+mnriem@users.noreply.github.com> Date: Mon, 4 May 2026 11:39:08 -0500 Subject: [PATCH 08/26] chore: release 0.8.5, begin 0.8.6.dev0 development (#2447) * chore: bump version to 0.8.5 * chore: begin 0.8.6.dev0 development --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 15 +++++++++++++++ pyproject.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 48db19ddf4..b15c06dbe2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,21 @@ +## [0.8.5] - 2026-05-04 + +### Changed + +- feat(presets): add Spec2Cloud preset for Azure deployment workflow (#2413) +- update security-review and memory-md extensions to latest versions (#2445) +- fix: honor template overrides for tasks-template (#2278) (#2292) +- Add token-analyzer to community catalog (#2433) +- docs: add April 2026 newsletter (#2434) +- feat: emit init-time notice for git extension default change (#2165) (#2432) +- Update DyanGalih(Memory Hub and Security Review) community extensions (#2429) +- Support controlled multi-install for safe AI agent integrations (#2389) +- chore(integrations): clean up docs and project guard (#2428) +- chore: release 0.8.4, begin 0.8.5.dev0 development (#2431) + ## [0.8.4] - 2026-05-01 ### Changed diff --git a/pyproject.toml b/pyproject.toml index 98920d8549..dd2e597e95 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "specify-cli" -version = "0.8.5.dev0" +version = "0.8.6.dev0" description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)." requires-python = ">=3.11" dependencies = [ From 1994bd766ea2a3b1d9d87dcec18abc9410f39834 Mon Sep 17 00:00:00 2001 From: Thorsten Hindermann Date: Mon, 4 May 2026 18:43:42 +0200 Subject: [PATCH 09/26] Add agent-parity-governance to community catalog (#2382) --- docs/community/presets.md | 1 + presets/catalog.community.json | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) diff --git a/docs/community/presets.md b/docs/community/presets.md index b1eaffd318..e622875ef2 100644 --- a/docs/community/presets.md +++ b/docs/community/presets.md @@ -8,6 +8,7 @@ The following community-contributed presets customize how Spec Kit behaves — o | Preset | Purpose | Provides | Requires | URL | |--------|---------|----------|----------|-----| | A11Y Governance | Adds WCAG 2.2 AA accessibility checks, bilingual DE/EN delivery, CEFR-B2 readability, CLI accessibility, and inclusive-content guidance | 9 templates, 3 commands | — | [spec-kit-preset-a11y-governance](https://github.com/hindermath/spec-kit-preset-a11y-governance) | +| Agent Parity Governance | Keeps shared AI-agent instructions aligned across project-defined agent guidance surfaces and documents intentional deviations | 6 templates, 3 commands | — | [spec-kit-preset-agent-parity-governance](https://github.com/hindermath/spec-kit-preset-agent-parity-governance) | | AIDE In-Place Migration | Adapts the AIDE extension workflow for in-place technology migrations (X → Y pattern) — adds migration objectives, verification gates, knowledge documents, and behavioral equivalence criteria | 2 templates, 8 commands | AIDE extension | [spec-kit-presets](https://github.com/mnriem/spec-kit-presets) | | Architecture Governance | Adds secure architecture governance: trust boundaries, threat modeling, STRIDE/CAPEC, S-ADRs, Zero Trust applicability, and OWASP SAMM | 11 templates, 3 commands | — | [spec-kit-preset-architecture-governance](https://github.com/hindermath/spec-kit-preset-architecture-governance) | | Canon Core | Adapts original Spec Kit workflow to work together with Canon extension | 2 templates, 8 commands | — | [spec-kit-canon](https://github.com/maximiliamus/spec-kit-canon) | diff --git a/presets/catalog.community.json b/presets/catalog.community.json index 7791365fef..bf9725e625 100644 --- a/presets/catalog.community.json +++ b/presets/catalog.community.json @@ -31,6 +31,34 @@ "created_at": "2026-04-27T00:00:00Z", "updated_at": "2026-04-27T00:00:00Z" }, + "agent-parity-governance": { + "name": "Agent Parity Governance", + "id": "agent-parity-governance", + "version": "0.1.0", + "description": "Keeps shared AI-agent guidance aligned across a project-defined set of agent instruction surfaces.", + "author": "Thorsten Hindermann", + "repository": "https://github.com/hindermath/spec-kit-preset-agent-parity-governance", + "download_url": "https://github.com/hindermath/spec-kit-preset-agent-parity-governance/archive/refs/tags/v0.1.0.zip", + "homepage": "https://github.com/hindermath/spec-kit-preset-agent-parity-governance", + "documentation": "https://github.com/hindermath/spec-kit-preset-agent-parity-governance/blob/main/README.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "templates": 6, + "commands": 3 + }, + "tags": [ + "agents", + "governance", + "parity", + "agent-guidance", + "multi-agent" + ], + "created_at": "2026-04-27T00:00:00Z", + "updated_at": "2026-04-27T00:00:00Z" + }, "aide-in-place": { "name": "AIDE In-Place Migration", "id": "aide-in-place", From a7201c183e9cd628ff990dbb85dfbb7250662cda Mon Sep 17 00:00:00 2001 From: Pascal THUET Date: Mon, 4 May 2026 19:00:28 +0200 Subject: [PATCH 10/26] fix(workflows): require project for catalog list (#2436) --- src/specify_cli/__init__.py | 2 +- tests/integrations/test_cli.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index ccd670d20e..176eecc2d4 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5806,7 +5806,7 @@ def workflow_catalog_list(): """List configured workflow catalog sources.""" from .workflows.catalog import WorkflowCatalog, WorkflowCatalogError - project_root = Path.cwd() + project_root = _require_specify_project() catalog = WorkflowCatalog(project_root) try: diff --git a/tests/integrations/test_cli.py b/tests/integrations/test_cli.py index 7732d57300..b94f9cc9fd 100644 --- a/tests/integrations/test_cli.py +++ b/tests/integrations/test_cli.py @@ -1117,6 +1117,7 @@ def test_project_scoped_commands_require_specify_directory(self, tmp_path): ["workflow", "remove", "demo"], ["workflow", "search"], ["workflow", "info", "demo"], + ["workflow", "catalog", "list"], ["workflow", "catalog", "add", "https://example.com/catalog.yml"], ["workflow", "catalog", "remove", "0"], ] From 09f7657f5b880fca0fbbfceb5f3759a3bfd64a7e Mon Sep 17 00:00:00 2001 From: Pascal THUET Date: Mon, 4 May 2026 21:08:07 +0200 Subject: [PATCH 11/26] Pin GitHub Actions by SHA (#2441) --- .github/workflows/codeql.yml | 6 +++--- .github/workflows/docs.yml | 11 +++++------ .github/workflows/lint.yml | 2 +- .github/workflows/release-trigger.yml | 2 +- .github/workflows/release.yml | 3 +-- .github/workflows/stale.yml | 2 +- .github/workflows/test.yml | 8 ++++---- 7 files changed, 16 insertions(+), 18 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 01e0df4a51..1af463c718 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -19,14 +19,14 @@ jobs: language: [ 'actions', 'python' ] steps: - name: Checkout repository - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Initialize CodeQL - uses: github/codeql-action/init@v4 + uses: github/codeql-action/init@e46ed2cbd01164d986452f91f178727624ae40d7 # v4 with: languages: ${{ matrix.language }} - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v4 + uses: github/codeql-action/analyze@e46ed2cbd01164d986452f91f178727624ae40d7 # v4 with: category: "/language:${{ matrix.language }}" diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 6fe87ddce2..9cb48f8f38 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -30,12 +30,12 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: fetch-depth: 0 # Fetch all history for git info - name: Setup .NET - uses: actions/setup-dotnet@v4 + uses: actions/setup-dotnet@67a3573c9a986a3f9c594539f4ab511d57bb3ce9 # v4 with: dotnet-version: '8.x' @@ -48,10 +48,10 @@ jobs: docfx docfx.json - name: Setup Pages - uses: actions/configure-pages@v6 + uses: actions/configure-pages@45bfe0192ca1faeb007ade9deae92b16b8254a0d # v6 - name: Upload artifact - uses: actions/upload-pages-artifact@v5 + uses: actions/upload-pages-artifact@fc324d3547104276b827a68afc52ff2a11cc49c9 # v5 with: path: 'docs/_site' @@ -66,5 +66,4 @@ jobs: steps: - name: Deploy to GitHub Pages id: deployment - uses: actions/deploy-pages@v5 - + uses: actions/deploy-pages@cd2ce8fcbc39b97be8ca5fce6e763baed58fa128 # v5 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 8b11ccdfff..3b2ad70bfb 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 - name: Run markdownlint-cli2 uses: DavidAnson/markdownlint-cli2-action@6b51ade7a9e4a75a7ad929842dd298a3804ebe8b # v23 diff --git a/.github/workflows/release-trigger.yml b/.github/workflows/release-trigger.yml index a451accfe6..c3728e2363 100644 --- a/.github/workflows/release-trigger.yml +++ b/.github/workflows/release-trigger.yml @@ -16,7 +16,7 @@ jobs: pull-requests: write steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: fetch-depth: 0 token: ${{ secrets.RELEASE_PAT }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7b903cf979..9437bd02e7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -12,7 +12,7 @@ jobs: contents: write steps: - name: Checkout repository - uses: actions/checkout@v6 + uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6 with: fetch-depth: 0 token: ${{ secrets.GITHUB_TOKEN }} @@ -86,4 +86,3 @@ jobs: --notes-file release_notes.md env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 076d05336a..919add00f0 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -14,7 +14,7 @@ jobs: stale: runs-on: ubuntu-latest steps: - - uses: actions/stale@v10 + - uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10 with: # Days of inactivity before an issue or PR becomes stale days-before-stale: 150 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 7354dd8e28..f7130aa8d1 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -13,13 +13,13 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Install uv uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 - name: Set up Python - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 with: python-version: "3.13" @@ -34,13 +34,13 @@ jobs: python-version: ["3.11", "3.12", "3.13"] steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@34e114876b0b11c390a56381ad16ebd13914f8d5 # v4 - name: Install uv uses: astral-sh/setup-uv@08807647e7069bb48b6ef5acd8ec9567f424441b # v8.1.0 - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v6 + uses: actions/setup-python@a309ff8b426b58ec0e2a45f0f869d46889d02405 # v6 with: python-version: ${{ matrix.python-version }} From 4a8f19cc63450effa4952b9574c255f3671e35d8 Mon Sep 17 00:00:00 2001 From: Ben Lawson Date: Mon, 4 May 2026 16:50:12 -0400 Subject: [PATCH 12/26] Update Ralph Loop to v1.0.2 (#2435) --- README.md | 2 +- extensions/catalog.community.json | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 801e962549..01bdbfbe17 100644 --- a/README.md +++ b/README.md @@ -243,7 +243,7 @@ The following community-contributed extensions are available in [`catalog.commun | Project Health Check | Diagnose a Spec Kit project and report health issues across structure, agents, features, scripts, extensions, and git | `visibility` | Read-only | [spec-kit-doctor](https://github.com/KhawarHabibKhan/spec-kit-doctor) | | Project Status | Show current SDD workflow progress — active feature, artifact status, task completion, workflow phase, and extensions summary | `visibility` | Read-only | [spec-kit-status](https://github.com/KhawarHabibKhan/spec-kit-status) | | QA Testing Extension | Systematic QA testing with browser-driven or CLI-based validation of acceptance criteria from spec | `code` | Read-only | [spec-kit-qa](https://github.com/arunt14/spec-kit-qa) | -| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss/spec-kit-ralph) | +| Ralph Loop | Autonomous implementation loop using AI agent CLI | `code` | Read+Write | [spec-kit-ralph](https://github.com/Rubiss-Projects/spec-kit-ralph) | | Reconcile Extension | Reconcile implementation drift by surgically updating feature artifacts. | `docs` | Read+Write | [spec-kit-reconcile](https://github.com/stn1slv/spec-kit-reconcile) | | Red Team | Adversarial review of specs before /speckit.plan — parallel lens agents surface risks that clarify/analyze structurally can't (prompt injection, integrity gaps, cross-spec drift, silent failures). Produces a structured findings report; no auto-edits to specs. | `docs` | Read+Write | [spec-kit-red-team](https://github.com/ashbrener/spec-kit-red-team) | | Repository Index | Generate index for existing repo for overview, architecture and module level. | `docs` | Read-only | [spec-kit-repoindex](https://github.com/liuyiyu/spec-kit-repoindex) | diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index c8361286cf..d00643bac5 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-05-03T00:00:00Z", + "updated_at": "2026-05-04T17:02:08Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", "extensions": { "aide": { @@ -1598,12 +1598,12 @@ "id": "ralph", "description": "Autonomous implementation loop using AI agent CLI.", "author": "Rubiss", - "version": "1.0.1", - "download_url": "https://github.com/Rubiss/spec-kit-ralph/archive/refs/tags/v1.0.1.zip", - "repository": "https://github.com/Rubiss/spec-kit-ralph", - "homepage": "https://github.com/Rubiss/spec-kit-ralph", - "documentation": "https://github.com/Rubiss/spec-kit-ralph/blob/main/README.md", - "changelog": "https://github.com/Rubiss/spec-kit-ralph/blob/main/CHANGELOG.md", + "version": "1.0.2", + "download_url": "https://github.com/Rubiss-Projects/spec-kit-ralph/archive/refs/tags/v1.0.2.zip", + "repository": "https://github.com/Rubiss-Projects/spec-kit-ralph", + "homepage": "https://github.com/Rubiss-Projects/spec-kit-ralph", + "documentation": "https://github.com/Rubiss-Projects/spec-kit-ralph/blob/main/README.md", + "changelog": "https://github.com/Rubiss-Projects/spec-kit-ralph/blob/main/CHANGELOG.md", "license": "MIT", "requires": { "speckit_version": ">=0.1.0", @@ -1632,7 +1632,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-03-09T00:00:00Z", - "updated_at": "2026-04-12T19:00:00Z" + "updated_at": "2026-05-04T17:02:08Z" }, "reconcile": { "name": "Reconcile Extension", From 0d8685aa80e2c1e9c18f83f661e23e2c7c64250a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=EB=82=AE=ED=95=B4=EB=8B=AC=EB=B0=A4?= <5061546+formin@users.noreply.github.com> Date: Tue, 5 May 2026 07:14:31 +0900 Subject: [PATCH 13/26] Add multi-model-review extension to community catalog (#2446) Co-authored-by: formin --- README.md | 1 + extensions/catalog.community.json | 50 +++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) diff --git a/README.md b/README.md index 01bdbfbe17..f730e10b88 100644 --- a/README.md +++ b/README.md @@ -233,6 +233,7 @@ The following community-contributed extensions are available in [`catalog.commun | Memory MD | Spec Kit extension for repository-native Markdown memory that captures durable decisions, bugs, and project context | `docs` | Read+Write | [spec-kit-memory-hub](https://github.com/DyanGalih/spec-kit-memory-hub) | | MemoryLint | Agent memory governance tool: Automatically audits and fixes boundary conflicts between AGENTS.md and the constitution. | `process` | Read+Write | [memorylint](https://github.com/RbBtSn0w/spec-kit-extensions/tree/main/memorylint) | | Microsoft 365 Integration | Fetch Teams messages, meeting transcripts, and SharePoint/OneDrive files as local Markdown for spec generation | `integration` | Read+Write | [spec-kit-m365](https://github.com/BenBtg/spec-kit-m365) | +| Multi-Model Review | Cross-model Spec Kit handoffs for spec authoring, implementation routing, and review. | `process` | Read+Write | [multi-model-review](https://github.com/formin/multi-model-review) | | Onboard | Contextual onboarding and progressive growth for developers new to spec-kit projects. Explains specs, maps dependencies, validates understanding, and guides the next step | `process` | Read+Write | [spec-kit-onboard](https://github.com/dmux/spec-kit-onboard) | | Optimize | Audit and optimize AI governance for context efficiency — token budgets, rule health, interpretability, compression, coherence, and echo detection | `process` | Read+Write | [spec-kit-optimize](https://github.com/sakitA/spec-kit-optimize) | | OWASP LLM Threat Model | OWASP Top 10 for LLM Applications 2025 threat analysis on agent artifacts | `code` | Read-only | [spec-kit-threatmodel](https://github.com/NaviaSamal/spec-kit-threatmodel) | diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index d00643bac5..1de30036d3 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1340,6 +1340,56 @@ "created_at": "2026-04-09T00:00:00Z", "updated_at": "2026-04-16T13:10:26Z" }, + "multi-model-review": { + "name": "Multi-Model Review", + "id": "multi-model-review", + "description": "Cross-model Spec Kit handoffs for spec authoring, implementation routing, and review.", + "author": "formin", + "version": "0.1.0", + "download_url": "https://github.com/formin/multi-model-review/archive/refs/tags/v0.1.0.zip", + "repository": "https://github.com/formin/multi-model-review", + "homepage": "https://github.com/formin/multi-model-review", + "documentation": "https://github.com/formin/multi-model-review/blob/main/README.md", + "changelog": "https://github.com/formin/multi-model-review/blob/main/CHANGELOG.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.2.0", + "tools": [ + { + "name": "git", + "required": true + }, + { + "name": "codex", + "required": false + }, + { + "name": "gemini", + "required": false + }, + { + "name": "claude", + "required": false + } + ] + }, + "provides": { + "commands": 4, + "hooks": 0 + }, + "tags": [ + "review", + "workflow", + "multi-model", + "spec-driven-development", + "code" + ], + "verified": false, + "downloads": 0, + "stars": 0, + "created_at": "2026-05-04T02:51:52Z", + "updated_at": "2026-05-04T02:51:52Z" + }, "onboard": { "name": "Onboard", "id": "onboard", From 10f63c914d4390410fa8ce6bf917e765f76e4c08 Mon Sep 17 00:00:00 2001 From: Dyan Galih Date: Tue, 5 May 2026 22:48:19 +0700 Subject: [PATCH 14/26] Add Architecture Guard to community catalog (#2430) * feat: add Architecture Guard to community catalog - Add architecture-guard v1.4.0 extension entry to catalog - Add entry to README community extensions table - Includes built-in Laravel-specific governance rules * chore: update catalog timestamp to 2026-05-05 * fix: address PR feedback - Add 'governance' category to README legend (used by Architecture Guard) - Update architecture-guard timestamps to 2026-05-05 (submission date) - Align with published extension behavior (Laravel support now built-in) * chore: update Architecture Guard category to process - Changed from 'governance' to 'process' (official category) - Aligns with schema in EXTENSION-PUBLISHING-GUIDE.md - Removed 'governance' from category legend (not an official category) * chore: update timestamps to actual UTC datetime - Top-level updated_at: 2026-05-05T07:26:00Z - Entry created_at/updated_at: 2026-05-05T07:26:00Z - Replaces placeholder 00:00:00Z with actual submission time --- README.md | 1 + extensions/catalog.community.json | 35 ++++++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index f730e10b88..c6b8945974 100644 --- a/README.md +++ b/README.md @@ -198,6 +198,7 @@ The following community-contributed extensions are available in [`catalog.commun | Agent Assign | Assign specialized Claude Code agents to spec-kit tasks for targeted execution | `process` | Read+Write | [spec-kit-agent-assign](https://github.com/xymelon/spec-kit-agent-assign) | | AI-Driven Engineering (AIDE) | A structured 7-step workflow for building new projects from scratch with AI assistants — from vision through implementation | `process` | Read+Write | [aide](https://github.com/mnriem/spec-kit-extensions/tree/main/aide) | | Architect Impact Previewer | Predicts architectural impact, complexity, and risks of proposed changes before implementation. | `visibility` | Read-only | [spec-kit-architect-preview](https://github.com/UmmeHabiba1312/spec-kit-architect-preview) | +| Architecture Guard | Continuous architecture governance for AI-assisted development. Reviews specs, plans, and code for architecture drift, producing structured refactor tasks and evolution proposals. | `process` | Read+Write | [spec-kit-architecture-guard](https://github.com/DyanGalih/spec-kit-architecture-guard) | | Archive Extension | Archive merged features into main project memory. | `docs` | Read+Write | [spec-kit-archive](https://github.com/stn1slv/spec-kit-archive) | | Azure DevOps Integration | Sync user stories and tasks to Azure DevOps work items using OAuth authentication | `integration` | Read+Write | [spec-kit-azure-devops](https://github.com/pragya247/spec-kit-azure-devops) | | Blueprint | Stay code-literate in AI-driven development: review a complete code blueprint for every task from spec artifacts before /speckit.implement runs | `docs` | Read+Write | [spec-kit-blueprint](https://github.com/chordpli/spec-kit-blueprint) | diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index 1de30036d3..e8fd66cc50 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-05-04T17:02:08Z", + "updated_at": "2026-05-05T07:26:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", "extensions": { "aide": { @@ -100,6 +100,39 @@ "created_at": "2026-04-14T00:00:00Z", "updated_at": "2026-04-14T00:00:00Z" }, + "architecture-guard": { + "name": "Architecture Guard", + "id": "architecture-guard", + "description": "Continuous architecture governance for AI-assisted development. Reviews specs, plans, and code for architecture drift, producing structured refactor tasks and evolution proposals.", + "author": "DyanGalih", + "version": "1.4.0", + "download_url": "https://github.com/DyanGalih/spec-kit-architecture-guard/archive/refs/tags/v1.4.0.zip", + "repository": "https://github.com/DyanGalih/spec-kit-architecture-guard", + "homepage": "https://github.com/DyanGalih/spec-kit-architecture-guard", + "documentation": "https://github.com/DyanGalih/spec-kit-architecture-guard/blob/main/README.md", + "changelog": "https://github.com/DyanGalih/spec-kit-architecture-guard/releases", + "license": "MIT", + "requires": { + "speckit_version": ">=0.1.0" + }, + "provides": { + "commands": 6, + "hooks": 0 + }, + "tags": [ + "architecture", + "governance", + "drift-detection", + "refactor", + "monolithic", + "microservices" + ], + "verified": false, + "downloads": 0, + "stars": 0, + "created_at": "2026-05-05T07:26:00Z", + "updated_at": "2026-05-05T07:26:00Z" + }, "archive": { "name": "Archive Extension", "id": "archive", From 30e6fa9e32e65bc4c7f76a61a0c9feca2f1d3f33 Mon Sep 17 00:00:00 2001 From: Ayesha Aziz <163914368+ayesha-aziz123@users.noreply.github.com> Date: Tue, 5 May 2026 23:28:29 +0500 Subject: [PATCH 15/26] fix: validate URL scheme in build_github_request (#2449) * fix: validate URL scheme in build_github_request * Potential fix for pull request finding Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> * test: add missing hostname validation test for build_github_request * fix: update docstring and fix import grouping per Copilot feedback * fix: sort imports and simplify url validation in build_github_request --------- Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --- src/specify_cli/_github_http.py | 17 ++++++- tests/test_github_http.py | 79 +++++++++++++++++++++++++++++++++ 2 files changed, 94 insertions(+), 2 deletions(-) create mode 100644 tests/test_github_http.py diff --git a/src/specify_cli/_github_http.py b/src/specify_cli/_github_http.py index ee68a8325c..ffa804dbb7 100644 --- a/src/specify_cli/_github_http.py +++ b/src/specify_cli/_github_http.py @@ -8,8 +8,8 @@ import os import urllib.request -from urllib.parse import urlparse from typing import Dict +from urllib.parse import urlparse # GitHub-owned hostnames that should receive the Authorization header. # Includes codeload.github.com because GitHub archive URL downloads @@ -30,12 +30,25 @@ def build_github_request(url: str) -> urllib.request.Request: ``Authorization: Bearer `` header when the target hostname is one of the known GitHub-owned domains. Non-GitHub URLs are returned as plain requests so credentials are never leaked to third-party hosts. + + Raises: + ValueError: If ``url`` is empty or whitespace-only. + ValueError: If ``url`` does not use the ``http`` or ``https`` scheme. + ValueError: If ``url`` does not include a hostname. """ headers: Dict[str, str] = {} + url = url.strip() + if not url: + raise ValueError("url must not be empty") + parsed = urlparse(url) + if parsed.scheme not in {"http", "https"}: + raise ValueError(f"url must start with http:// or https://, got: {url!r}") + if not parsed.hostname: + raise ValueError(f"url must include a hostname, got: {url!r}") github_token = (os.environ.get("GITHUB_TOKEN") or "").strip() gh_token = (os.environ.get("GH_TOKEN") or "").strip() token = github_token or gh_token or None - hostname = (urlparse(url).hostname or "").lower() + hostname = parsed.hostname.lower() if token and hostname in GITHUB_HOSTS: headers["Authorization"] = f"Bearer {token}" return urllib.request.Request(url, headers=headers) diff --git a/tests/test_github_http.py b/tests/test_github_http.py new file mode 100644 index 0000000000..f414aeeb2b --- /dev/null +++ b/tests/test_github_http.py @@ -0,0 +1,79 @@ +"""Tests for GitHub-authenticated HTTP request helpers.""" + +import os +from unittest.mock import patch + +import pytest + +from specify_cli._github_http import ( + build_github_request, +) + + +class TestBuildGitHubRequest: + """Tests for build_github_request() URL validation and auth handling.""" + + # --- URL Validation Tests --- + + def test_empty_url_raises_value_error(self): + """build_github_request() must reject an empty string URL.""" + with pytest.raises(ValueError, match="url must not be empty"): + build_github_request("") + + def test_whitespace_url_raises_value_error(self): + """build_github_request() must reject a whitespace-only URL.""" + with pytest.raises(ValueError, match="url must not be empty"): + build_github_request(" ") + + def test_non_http_url_raises_value_error(self): + """build_github_request() must reject URLs without http/https scheme.""" + with pytest.raises(ValueError, match="url must start with http"): + build_github_request("not-a-url") + + def test_ftp_url_raises_value_error(self): + """build_github_request() must reject ftp:// URLs.""" + with pytest.raises(ValueError, match="url must start with http"): + build_github_request("ftp://github.com/file.zip") + + # --- Valid URL Tests --- + + def test_valid_https_url_returns_request(self): + """build_github_request() must return a Request for a valid https URL.""" + req = build_github_request("https://github.com/github/spec-kit") + assert req.full_url == "https://github.com/github/spec-kit" + + def test_valid_http_url_returns_request(self): + """build_github_request() must accept http:// URLs.""" + req = build_github_request("http://example.com/file") + assert req.full_url == "http://example.com/file" + + # --- Auth Header Tests --- + + def test_github_token_added_for_github_host(self): + """Authorization header is set when GITHUB_TOKEN is present.""" + with patch.dict(os.environ, {"GITHUB_TOKEN": "test-token", "GH_TOKEN": ""}): + req = build_github_request("https://github.com/github/spec-kit") + assert req.get_header("Authorization") == "Bearer test-token" + + def test_gh_token_used_as_fallback(self): + """GH_TOKEN is used when GITHUB_TOKEN is absent.""" + with patch.dict(os.environ, {"GITHUB_TOKEN": "", "GH_TOKEN": "fallback-token"}): + req = build_github_request("https://github.com/github/spec-kit") + assert req.get_header("Authorization") == "Bearer fallback-token" + + def test_no_auth_header_for_non_github_host(self): + """Authorization header must NOT be set for non-GitHub URLs.""" + with patch.dict(os.environ, {"GITHUB_TOKEN": "test-token"}): + req = build_github_request("https://example.com/file") + assert req.get_header("Authorization") is None + + def test_no_auth_header_when_no_token(self): + """No Authorization header when no token is set in environment.""" + with patch.dict(os.environ, {}, clear=True): + req = build_github_request("https://github.com/github/spec-kit") + assert req.get_header("Authorization") is None + + def test_missing_hostname_raises_value_error(self): + """build_github_request() must reject URLs with valid scheme but no hostname.""" + with pytest.raises(ValueError, match="url must include a hostname"): + build_github_request("http://") \ No newline at end of file From 0f2655181400defdac6904a9461f58a7416d4d72 Mon Sep 17 00:00:00 2001 From: Manfred Riem <15701806+mnriem@users.noreply.github.com> Date: Tue, 5 May 2026 16:59:25 -0500 Subject: [PATCH 16/26] feat: improve catalog submission templates and CODEOWNERS (#2401) Simplify the community catalog submission flow to use issue templates with manual maintainer review (no automation scripts or workflows). - Add explicit CODEOWNERS entries for catalog.community.json files so submissions are automatically assigned to a maintainer for review - Improve preset submission template: - Add 'Required Extensions' optional field - Make 'Templates Provided' optional (supports command-only presets) - Add 'Number of Scripts' optional field The existing extension and preset issue templates already collect all required catalog metadata. Maintainers review submissions and manually update the catalog JSON files. Closes #2400 --- .github/CODEOWNERS | 5 + .github/ISSUE_TEMPLATE/preset_submission.yml | 22 +- .github/workflows/catalog-assign.yml | 59 ++++ README.md | 2 +- docs/community/presets.md | 2 +- extensions/EXTENSION-DEVELOPMENT-GUIDE.md | 8 +- extensions/EXTENSION-PUBLISHING-GUIDE.md | 266 +++---------------- extensions/README.md | 12 +- presets/README.md | 2 +- 9 files changed, 129 insertions(+), 249 deletions(-) create mode 100644 .github/workflows/catalog-assign.yml diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index a60b7a0306..cf0686db1a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,3 +1,8 @@ # Global code owner * @mnriem +# Community catalog files — explicit ownership for when global ownership expands +/extensions/catalog.community.json @mnriem +/integrations/catalog.community.json @mnriem +/presets/catalog.community.json @mnriem + diff --git a/.github/ISSUE_TEMPLATE/preset_submission.yml b/.github/ISSUE_TEMPLATE/preset_submission.yml index 3a1b963492..f80e9cbdc5 100644 --- a/.github/ISSUE_TEMPLATE/preset_submission.yml +++ b/.github/ISSUE_TEMPLATE/preset_submission.yml @@ -95,11 +95,18 @@ body: validations: required: true + - type: input + id: required-extensions + attributes: + label: Required Extensions (optional) + description: Comma-separated list of required extension IDs (e.g., aide) + placeholder: "e.g., aide, canon" + - type: textarea id: templates-provided attributes: label: Templates Provided - description: List the template overrides your preset provides + description: List the template overrides your preset provides (enter "None" if command-only) placeholder: | - spec-template.md — adds compliance section - plan-template.md — includes audit checkpoints @@ -110,10 +117,19 @@ body: - type: textarea id: commands-provided attributes: - label: Commands Provided (optional) - description: List any command overrides your preset provides + label: Commands Provided + description: List the command overrides your preset provides (enter "None" if template-only) placeholder: | - speckit.specify.md — customized for compliance workflows + validations: + required: true + + - type: input + id: scripts-count + attributes: + label: Number of Scripts (optional) + description: How many scripts does your preset provide? (leave empty if none) + placeholder: "e.g., 1" - type: textarea id: tags diff --git a/.github/workflows/catalog-assign.yml b/.github/workflows/catalog-assign.yml new file mode 100644 index 0000000000..4191bcc554 --- /dev/null +++ b/.github/workflows/catalog-assign.yml @@ -0,0 +1,59 @@ +name: "Catalog: Auto-assign submission" + +on: + issues: + types: [opened, labeled] + +jobs: + assign: + if: > + (github.event.action == 'opened' && ( + contains(github.event.issue.labels.*.name, 'extension-submission') || + contains(github.event.issue.labels.*.name, 'preset-submission') + )) || + (github.event.action == 'labeled' && ( + github.event.label.name == 'extension-submission' || + github.event.label.name == 'preset-submission' + )) + runs-on: ubuntu-latest + permissions: + issues: write + steps: + - uses: actions/github-script@v7 + with: + script: | + const issue = context.payload.issue; + const assigned = (issue.assignees || []).map(a => a.login); + const marker = ''; + + // Assign mnriem if not already assigned + if (!assigned.includes('mnriem')) { + try { + await github.rest.issues.addAssignees({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + assignees: ['mnriem'], + }); + } catch (e) { + console.log(`Warning: could not assign mnriem: ${e.message}`); + } + } + + // Post team notification if not already posted + const comments = await github.paginate( + github.rest.issues.listComments, + { + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + } + ); + if (!comments.some(c => c.body && c.body.includes(marker))) { + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: marker + '\ncc @github/spec-kit-maintainers — new catalog submission for review.', + }); + } diff --git a/README.md b/README.md index c6b8945974..2ccc5d3fc0 100644 --- a/README.md +++ b/README.md @@ -174,7 +174,7 @@ Want to see Spec Kit in action? Watch our [video overview](https://www.youtube.c ## 🧩 Community Extensions > [!NOTE] -> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion. +> Community extensions are independently created and maintained by their respective authors. Maintainers only verify that catalog entries are complete and correctly formatted — they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion. 🔍 **Browse and search community extensions on the [Community Extensions website](https://speckit-community.github.io/extensions/).** diff --git a/docs/community/presets.md b/docs/community/presets.md index e622875ef2..6eb8019872 100644 --- a/docs/community/presets.md +++ b/docs/community/presets.md @@ -1,7 +1,7 @@ # Community Presets > [!NOTE] -> Community presets are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion. +> Community presets are independently created and maintained by their respective authors. Maintainers only verify that catalog entries are complete and correctly formatted — they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion. The following community-contributed presets customize how Spec Kit behaves — overriding templates, commands, and terminology without changing any tooling. Presets are available in [`catalog.community.json`](https://github.com/github/spec-kit/blob/main/presets/catalog.community.json): diff --git a/extensions/EXTENSION-DEVELOPMENT-GUIDE.md b/extensions/EXTENSION-DEVELOPMENT-GUIDE.md index 42ce2d71df..5f24e71f0c 100644 --- a/extensions/EXTENSION-DEVELOPMENT-GUIDE.md +++ b/extensions/EXTENSION-DEVELOPMENT-GUIDE.md @@ -528,11 +528,9 @@ specify extension add --from https://github.com/.../spec-kit-my Submit to the community catalog for public discovery: -1. **Fork** spec-kit repository -2. **Add entry** to `extensions/catalog.community.json` -3. **Update** the Community Extensions table in `README.md` with your extension -4. **Create PR** following the [Extension Publishing Guide](EXTENSION-PUBLISHING-GUIDE.md) -5. **After merge**, your extension becomes available: +1. **Create a GitHub release** for your extension +2. **File an issue** using the [Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml) template +3. **After review**, a maintainer updates the catalog and your extension becomes available: - Users can browse `catalog.community.json` to discover your extension - Users copy the entry to their own `catalog.json` - Users install with: `specify extension add my-ext` (from their catalog) diff --git a/extensions/EXTENSION-PUBLISHING-GUIDE.md b/extensions/EXTENSION-PUBLISHING-GUIDE.md index 1433738743..be5b375241 100644 --- a/extensions/EXTENSION-PUBLISHING-GUIDE.md +++ b/extensions/EXTENSION-PUBLISHING-GUIDE.md @@ -7,9 +7,8 @@ This guide explains how to publish your extension to the Spec Kit extension cata 1. [Prerequisites](#prerequisites) 2. [Prepare Your Extension](#prepare-your-extension) 3. [Submit to Catalog](#submit-to-catalog) -4. [Verification Process](#verification-process) -5. [Release Workflow](#release-workflow) -6. [Best Practices](#best-practices) +4. [Release Workflow](#release-workflow) +5. [Best Practices](#best-practices) --- @@ -133,222 +132,46 @@ specify extension add --from https://github.com/your-org/spec-k Spec Kit uses a dual-catalog system. For details about how catalogs work, see the main [Extensions README](README.md#extension-catalogs). -**For extension publishing**: All community extensions should be added to `catalog.community.json`. Users browse this catalog and copy extensions they trust into their own `catalog.json`. +**For extension publishing**: All community extensions are listed in `extensions/catalog.community.json`. Users browse this catalog and copy extensions they trust into their own `catalog.json`. -### 1. Fork the spec-kit Repository +### How to Submit -```bash -# Fork on GitHub -# https://github.com/github/spec-kit/fork - -# Clone your fork -git clone https://github.com/YOUR-USERNAME/spec-kit.git -cd spec-kit -``` - -### 2. Add Extension to Community Catalog - -Edit `extensions/catalog.community.json` and add your extension: - -```json -{ - "schema_version": "1.0", - "updated_at": "2026-01-28T15:54:00Z", - "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", - "extensions": { - "your-extension": { - "name": "Your Extension Name", - "id": "your-extension", - "description": "Brief description of your extension", - "author": "Your Name", - "version": "1.0.0", - "download_url": "https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.0.0.zip", - "repository": "https://github.com/your-org/spec-kit-your-extension", - "homepage": "https://github.com/your-org/spec-kit-your-extension", - "documentation": "https://github.com/your-org/spec-kit-your-extension/blob/main/docs/", - "changelog": "https://github.com/your-org/spec-kit-your-extension/blob/main/CHANGELOG.md", - "license": "MIT", - "requires": { - "speckit_version": ">=0.1.0", - "tools": [ - { - "name": "required-mcp-tool", - "version": ">=1.0.0", - "required": true - } - ] - }, - "provides": { - "commands": 3, - "hooks": 1 - }, - "tags": [ - "category", - "tool-name", - "feature" - ], - "verified": false, - "downloads": 0, - "stars": 0, - "created_at": "2026-01-28T00:00:00Z", - "updated_at": "2026-01-28T00:00:00Z" - } - } -} -``` - -**Important**: - -- Set `verified: false` (maintainers will verify) -- Set `downloads: 0` and `stars: 0` (auto-updated later) -- Use current timestamp for `created_at` and `updated_at` -- Update the top-level `updated_at` to current time +To submit your extension to the community catalog, file a new issue using the **[Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml)** template. The template collects all required metadata, including: -### 3. Update Community Extensions Table +- Extension ID, name, and version +- Description, author, and license +- Repository, download URL, and documentation links +- Required Spec Kit version and any tool dependencies +- Number of commands and hooks +- Tags and key features +- Testing confirmation -Add your extension to the Community Extensions table in the project root `README.md`: +> [!IMPORTANT] +> Do **not** open a pull request directly to edit `extensions/catalog.community.json`. All community extension submissions must go through the issue template so a maintainer can review the entry and update the catalog. -```markdown -| Your Extension Name | Brief description of what it does | `` | | [repo-name](https://github.com/your-org/spec-kit-your-extension) | -``` - -**(Table) Category** — pick the one that best fits your extension: - -- `docs` — reads, validates, or generates spec artifacts -- `code` — reviews, validates, or modifies source code -- `process` — orchestrates workflow across phases -- `integration` — syncs with external platforms -- `visibility` — reports on project health or progress - -**Effect** — choose one: - -- Read-only — produces reports without modifying files -- Read+Write — modifies files, creates artifacts, or updates specs - -Insert your extension in alphabetical order in the table. +### What Happens After You Submit -### 4. Submit Pull Request +1. Your issue is automatically labeled and assigned to a maintainer for review +2. A maintainer verifies that the catalog entry is complete and correctly formatted +3. Once approved, the maintainer adds your extension to `extensions/catalog.community.json` and the Community Extensions table in the README +4. Your extension becomes discoverable via `specify extension search` -```bash -# Create a branch -git checkout -b add-your-extension - -# Commit your changes -git add extensions/catalog.community.json README.md -git commit -m "Add your-extension to community catalog - -- Extension ID: your-extension -- Version: 1.0.0 -- Author: Your Name -- Description: Brief description -" +### What Maintainers Check -# Push to your fork -git push origin add-your-extension +- The catalog entry fields are complete and correctly formatted +- The download URL is accessible +- The repository exists and contains an `extension.yml` manifest -# Create Pull Request on GitHub -# https://github.com/github/spec-kit/compare -``` - -**Pull Request Template**: - -```markdown -## Extension Submission - -**Extension Name**: Your Extension Name -**Extension ID**: your-extension -**Version**: 1.0.0 -**Author**: Your Name -**Repository**: https://github.com/your-org/spec-kit-your-extension - -### Description -Brief description of what your extension does. - -### Checklist -- [x] Valid extension.yml manifest -- [x] README.md with installation and usage docs -- [x] LICENSE file included -- [x] GitHub release created (v1.0.0) -- [x] Extension tested on real project -- [x] All commands working -- [x] No security vulnerabilities -- [x] Added to extensions/catalog.community.json -- [x] Added to Community Extensions table in README.md - -### Testing -Tested on: -- macOS 13.0+ with spec-kit 0.1.0 -- Project: [Your test project] - -### Additional Notes -Any additional context or notes for reviewers. -``` +> [!NOTE] +> Maintainers do **not** review, audit, or test the extension code itself. ---- - -## Verification Process - -### What Happens After Submission - -1. **Automated Checks** (if available): - - Manifest validation - - Download URL accessibility - - Repository existence - - License file presence - -2. **Manual Review**: - - Code quality review - - Security audit - - Functionality testing - - Documentation review - -3. **Verification**: - - If approved, `verified: true` is set - - Extension appears in `specify extension search --verified` - -### Verification Criteria - -To be verified, your extension must: - -✅ **Functionality**: - -- Works as described in documentation -- All commands execute without errors -- No breaking changes to user workflows - -✅ **Security**: - -- No known vulnerabilities -- No malicious code -- Safe handling of user data -- Proper validation of inputs - -✅ **Code Quality**: - -- Clean, readable code -- Follows extension best practices -- Proper error handling -- Helpful error messages - -✅ **Documentation**: - -- Clear installation instructions -- Usage examples -- Troubleshooting section -- Accurate description +### Typical Review Timeline -✅ **Maintenance**: +- **Review**: 3-7 business days -- Active repository -- Responsive to issues -- Regular updates -- Semantic versioning followed +### Updating an Existing Extension -### Typical Review Timeline - -- **Automated checks**: Immediate (if implemented) -- **Manual review**: 3-7 business days -- **Verification**: After successful review +To update an extension that is already in the catalog (e.g., for a new version), file a new **[Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml)** issue with the updated version, download URL, and any other changed fields. Mention in the issue that this is an update to an existing entry. --- @@ -385,26 +208,7 @@ When releasing a new version: # Create release on GitHub ``` -4. **Update catalog**: - - ```bash - # Fork spec-kit repo (or update existing fork) - cd spec-kit - - # Update extensions/catalog.json - jq '.extensions["your-extension"].version = "1.1.0"' extensions/catalog.json > tmp.json && mv tmp.json extensions/catalog.json - jq '.extensions["your-extension"].download_url = "https://github.com/your-org/spec-kit-your-extension/archive/refs/tags/v1.1.0.zip"' extensions/catalog.json > tmp.json && mv tmp.json extensions/catalog.json - jq '.extensions["your-extension"].updated_at = "2026-02-15T00:00:00Z"' extensions/catalog.json > tmp.json && mv tmp.json extensions/catalog.json - jq '.updated_at = "2026-02-15T00:00:00Z"' extensions/catalog.json > tmp.json && mv tmp.json extensions/catalog.json - - # Submit PR - git checkout -b update-your-extension-v1.1.0 - git add extensions/catalog.json - git commit -m "Update your-extension to v1.1.0" - git push origin update-your-extension-v1.1.0 - ``` - -5. **Submit update PR** with changelog in description +4. **File an update submission** using the [Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml) template with the new version and download URL. Mention in the issue that this is an update to an existing entry. --- @@ -473,9 +277,9 @@ A: The main catalog is for public extensions only. For private extensions: - Users add your catalog: `specify extension add-catalog https://your-domain.com/catalog.json` - Not yet implemented - coming in Phase 4 -### Q: How long does verification take? +### Q: How long does review take? -A: Typically 3-7 business days for initial review. Updates to verified extensions are usually faster. +A: Typically 3-7 business days. Updates to existing extensions are usually faster. ### Q: What if my extension is rejected? @@ -483,11 +287,11 @@ A: You'll receive feedback on what needs to be fixed. Make the changes and resub ### Q: Can I update my extension anytime? -A: Yes, submit a PR to update the catalog with your new version. Verified status may be re-evaluated for major changes. +A: Yes, file a new [Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml) issue with the updated version and download URL. Mention that it is an update to an existing entry. ### Q: Do I need to be verified to be in the catalog? -A: No, unverified extensions are still searchable. Verification just adds trust and visibility. +A: No. All community extensions are listed in the catalog once their submission is reviewed and accepted. ### Q: Can extensions have paid features? @@ -536,7 +340,7 @@ A: Extensions should be free and open-source. Commercial support/services are al "hooks": "integer (optional)" }, "tags": ["array of strings (2-10 tags)"], - "verified": "boolean (default: false)", + "verified": "boolean (default: false, set by maintainers)", "downloads": "integer (auto-updated)", "stars": "integer (auto-updated)", "created_at": "string (ISO 8601 datetime)", diff --git a/extensions/README.md b/extensions/README.md index f535ba539a..4dc9e64f5c 100644 --- a/extensions/README.md +++ b/extensions/README.md @@ -25,13 +25,13 @@ specify extension search # Now uses your organization's catalog instead of the ### Community Reference Catalog (`catalog.community.json`) > [!NOTE] -> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. Review extension source code before installation and use at your own discretion. +> Community extensions are independently created and maintained by their respective authors. Maintainers only verify that catalog entries are complete and correctly formatted — they do **not review, audit, endorse, or support the extension code itself**. Review extension source code before installation and use at your own discretion. - **Purpose**: Browse available community-contributed extensions - **Status**: Active - contains extensions submitted by the community - **Location**: `extensions/catalog.community.json` - **Usage**: Reference catalog for discovering available extensions -- **Submission**: Open to community contributions via Pull Request +- **Submission**: Open to community contributions via [issue template](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml) **How It Works:** @@ -72,7 +72,7 @@ specify extension add --from https://github.com/org/spec-kit-ex ## Available Community Extensions > [!NOTE] -> Community extensions are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion. +> Community extensions are independently created and maintained by their respective authors. Maintainers only verify that catalog entries are complete and correctly formatted — they do **not review, audit, endorse, or support the extension code itself**. The Community Extensions website is also a third-party resource. Review extension source code before installation and use at your own discretion. 🔍 **Browse and search community extensions on the [Community Extensions website](https://speckit-community.github.io/extensions/).** @@ -89,10 +89,8 @@ To add your extension to the community catalog: 1. **Prepare your extension** following the [Extension Development Guide](EXTENSION-DEVELOPMENT-GUIDE.md) 2. **Create a GitHub release** for your extension -3. **Submit a Pull Request** that: - - Adds your extension to `extensions/catalog.community.json` - - Updates this README with your extension in the Available Extensions table -4. **Wait for review** - maintainers will review and merge if criteria are met +3. **File an issue** using the [Extension Submission](https://github.com/github/spec-kit/issues/new?template=extension_submission.yml) template with all required metadata +4. **Wait for review** — a maintainer will review the submission, update the catalog, and close the issue See the [Extension Publishing Guide](EXTENSION-PUBLISHING-GUIDE.md) for detailed step-by-step instructions. diff --git a/presets/README.md b/presets/README.md index abaeb27067..29cce64248 100644 --- a/presets/README.md +++ b/presets/README.md @@ -98,7 +98,7 @@ Multiple composing presets chain recursively. For example, a security preset wit Presets are discovered through catalogs. By default, Spec Kit uses the official and community catalogs: > [!NOTE] -> Community presets are independently created and maintained by their respective authors. GitHub and the Spec Kit maintainers may review pull requests that add entries to the community catalog for formatting, catalog structure, or policy compliance, but they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion. +> Community presets are independently created and maintained by their respective authors. Maintainers only verify that catalog entries are complete and correctly formatted — they do **not review, audit, endorse, or support the preset code itself**. Review preset source code before installation and use at your own discretion. ```bash # List active catalogs From b4060d562040341e1cf35d9ef0d266c52728d72f Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Wed, 6 May 2026 06:56:59 -0500 Subject: [PATCH 17/26] Load constitution context in `/speckit.implement` to enforce governance during implementation (#2460) * Initial plan * fix implement command to load constitution context Agent-Logs-Url: https://github.com/github/spec-kit/sessions/05663d9d-149b-4c13-a22d-2552b3fa619c Co-authored-by: mnriem <15701806+mnriem@users.noreply.github.com> * Potential fix for pull request finding Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: mnriem <15701806+mnriem@users.noreply.github.com> Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com> --- templates/commands/implement.md | 1 + tests/integrations/test_integration_generic.py | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/templates/commands/implement.md b/templates/commands/implement.md index 7ba5ba8e0c..52a042161f 100644 --- a/templates/commands/implement.md +++ b/templates/commands/implement.md @@ -88,6 +88,7 @@ You **MUST** consider the user input before proceeding (if not empty). - **IF EXISTS**: Read data-model.md for entities and relationships - **IF EXISTS**: Read contracts/ for API specifications and test requirements - **IF EXISTS**: Read research.md for technical decisions and constraints + - **IF EXISTS**: Read /memory/constitution.md for governance constraints - **IF EXISTS**: Read quickstart.md for integration scenarios 4. **Project Setup Verification**: diff --git a/tests/integrations/test_integration_generic.py b/tests/integrations/test_integration_generic.py index 290a36419e..4f515a01d2 100644 --- a/tests/integrations/test_integration_generic.py +++ b/tests/integrations/test_integration_generic.py @@ -185,6 +185,16 @@ def test_plan_references_correct_context_file(self, tmp_path): ) assert "__CONTEXT_FILE__" not in content + def test_implement_loads_constitution_context(self, tmp_path): + """The generated implement command should load constitution governance context.""" + i = get_integration("generic") + m = IntegrationManifest("generic", tmp_path) + i.setup(tmp_path, m, parsed_options={"commands_dir": ".custom/cmds"}) + implement_file = tmp_path / ".custom" / "cmds" / "speckit.implement.md" + assert implement_file.exists() + content = implement_file.read_text(encoding="utf-8") + assert ".specify/memory/constitution.md" in content + # -- CLI -------------------------------------------------------------- def test_cli_generic_without_commands_dir_fails(self, tmp_path): From 77e605da6bff2f9809461f17d01f34d65f87d2e7 Mon Sep 17 00:00:00 2001 From: Manfred Riem <15701806+mnriem@users.noreply.github.com> Date: Wed, 6 May 2026 07:02:55 -0500 Subject: [PATCH 18/26] chore: release 0.8.6, begin 0.8.7.dev0 development (#2463) * chore: bump version to 0.8.6 * chore: begin 0.8.7.dev0 development --------- Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> --- CHANGELOG.md | 15 +++++++++++++++ pyproject.toml | 2 +- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b15c06dbe2..602b1129d7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,21 @@ +## [0.8.6] - 2026-05-06 + +### Changed + +- Load constitution context in `/speckit.implement` to enforce governance during implementation (#2460) +- feat: improve catalog submission templates and CODEOWNERS (#2401) +- fix: validate URL scheme in build_github_request (#2449) +- Add Architecture Guard to community catalog (#2430) +- Add multi-model-review extension to community catalog (#2446) +- Update Ralph Loop to v1.0.2 (#2435) +- Pin GitHub Actions by SHA (#2441) +- fix(workflows): require project for catalog list (#2436) +- Add agent-parity-governance to community catalog (#2382) +- chore: release 0.8.5, begin 0.8.6.dev0 development (#2447) + ## [0.8.5] - 2026-05-04 ### Changed diff --git a/pyproject.toml b/pyproject.toml index dd2e597e95..cfeaf74fc2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "specify-cli" -version = "0.8.6.dev0" +version = "0.8.7.dev0" description = "Specify CLI, part of GitHub Spec Kit. A tool to bootstrap your projects for Spec-Driven Development (SDD)." requires-python = ">=3.11" dependencies = [ From c0bf5d0c648574e49e52f17d6042c178b888a200 Mon Sep 17 00:00:00 2001 From: Quratulain-bilal Date: Wed, 6 May 2026 22:07:02 +0500 Subject: [PATCH 19/26] feat(catalog): add Cost Tracker (cost) community extension (#2448) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(catalog): add Cost Tracker (cost) community extension Adds a new entry for spec-kit-cost — track real LLM dollar cost across SDD workflows with per-feature budgets, per-integration comparison, and finance-ready exports. Repo: https://github.com/Quratulain-bilal/spec-kit-cost Release: v1.0.0 * docs(catalog): add Cost Tracker README row, bump updated_at Address Copilot review feedback: - Add Cost Tracker row to README community extensions table - Bump top-level updated_at per EXTENSION-PUBLISHING-GUIDE.md * fix(catalog): address Copilot feedback on cost extension entry - Move cost entry after confluence so the c* block is alphabetized - Bump top-level updated_at to 2026-05-05 per EXTENSION-PUBLISHING-GUIDE - Use documented 'visibility' category in README (not 'analytics'), matching Token Consumption Analyzer's classification - Replace 'analytics' tag with 'visibility' in catalog tags for consistency * fix(catalog): bump top-level updated_at for cost entry addition Address Copilot feedback: the file-level updated_at must be bumped on every catalog change per EXTENSION-PUBLISHING-GUIDE.md:204-205. --------- Co-authored-by: Quratulain-bilal --- README.md | 1 + extensions/catalog.community.json | 36 +++++++++++++++++++++++++++++-- 2 files changed, 35 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 2ccc5d3fc0..a9516af5c3 100644 --- a/README.md +++ b/README.md @@ -212,6 +212,7 @@ The following community-contributed extensions are available in [`catalog.commun | Cleanup Extension | Post-implementation quality gate that reviews changes, fixes small issues (scout rule), creates tasks for medium issues, and generates analysis for large issues | `code` | Read+Write | [spec-kit-cleanup](https://github.com/dsrednicki/spec-kit-cleanup) | | Conduct Extension | Orchestrates spec-kit phases via sub-agent delegation to reduce context pollution. | `process` | Read+Write | [spec-kit-conduct-ext](https://github.com/twbrandon7/spec-kit-conduct-ext) | | Confluence Extension | Create a doc in Confluence summarizing the specifications and planning files | `integration` | Read+Write | [spec-kit-confluence](https://github.com/aaronrsun/spec-kit-confluence) | +| Cost Tracker | Track real LLM dollar cost across SDD workflows — per-feature budgets, per-integration comparison, and finance-ready exports | `visibility` | Read+Write | [spec-kit-cost](https://github.com/Quratulain-bilal/spec-kit-cost) | | DocGuard — CDD Enforcement | Canonical-Driven Development enforcement. Validates, scores, and traces project documentation with automated checks, AI-driven workflows, and spec-kit hooks. Zero NPM runtime dependencies. | `docs` | Read+Write | [spec-kit-docguard](https://github.com/raccioly/docguard) | | Extensify | Create and validate extensions and extension catalogs | `process` | Read+Write | [extensify](https://github.com/mnriem/spec-kit-extensions/tree/main/extensify) | | Fix Findings | Automated analyze-fix-reanalyze loop that resolves spec findings until clean | `code` | Read+Write | [spec-kit-fix-findings](https://github.com/Quratulain-bilal/spec-kit-fix-findings) | diff --git a/extensions/catalog.community.json b/extensions/catalog.community.json index e8fd66cc50..81d4e1f18e 100644 --- a/extensions/catalog.community.json +++ b/extensions/catalog.community.json @@ -1,6 +1,6 @@ { "schema_version": "1.0", - "updated_at": "2026-05-05T07:26:00Z", + "updated_at": "2026-05-06T00:00:00Z", "catalog_url": "https://raw.githubusercontent.com/github/spec-kit/main/extensions/catalog.community.json", "extensions": { "aide": { @@ -580,6 +580,38 @@ "created_at": "2026-03-29T00:00:00Z", "updated_at": "2026-03-29T00:00:00Z" }, + "cost": { + "name": "Cost Tracker", + "id": "cost", + "description": "Track real LLM dollar cost across SDD workflows — per-feature budgets, per-integration comparison, and finance-ready exports.", + "author": "Quratulain-bilal", + "version": "1.0.0", + "download_url": "https://github.com/Quratulain-bilal/spec-kit-cost/archive/refs/tags/v1.0.0.zip", + "repository": "https://github.com/Quratulain-bilal/spec-kit-cost", + "homepage": "https://github.com/Quratulain-bilal/spec-kit-cost", + "documentation": "https://github.com/Quratulain-bilal/spec-kit-cost/blob/main/README.md", + "changelog": "https://github.com/Quratulain-bilal/spec-kit-cost/blob/main/CHANGELOG.md", + "license": "MIT", + "requires": { + "speckit_version": ">=0.8.0" + }, + "provides": { + "commands": 5, + "hooks": 0 + }, + "tags": [ + "cost", + "budget", + "tokens", + "visibility", + "finance" + ], + "verified": false, + "downloads": 0, + "stars": 0, + "created_at": "2026-05-03T00:00:00Z", + "updated_at": "2026-05-05T00:00:00Z" + }, "diagram": { "name": "Spec Diagram", "id": "diagram", @@ -2905,7 +2937,7 @@ "downloads": 0, "stars": 0, "created_at": "2026-04-13T00:00:00Z", - "updated_at": "2026-04-13T00:00:00Z" + "updated_at": "2026-04-13T00:00:00Z" } } } From 793632089ad924591c32a5b01a5192d8cc254b2b Mon Sep 17 00:00:00 2001 From: Eric Rodriguez Suazo <97453318+ericnoam@users.noreply.github.com> Date: Wed, 6 May 2026 19:19:10 +0200 Subject: [PATCH 20/26] fix(forge): use hyphen notation for command refs in Forge integration (#2462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(forge): use hyphen notation for command refs in Forge integration - Add invoke_separator = "-" class attribute to ForgeIntegration so effective_invoke_separator() returns "-" for shared-template installs - Add "invoke_separator": "-" to ForgeIntegration.registrar_config so agents.py CommandRegistrar can resolve refs with the correct separator - Pass invoke_separator to process_template() in ForgeIntegration.setup() so all .forge/commands/*.md bodies use /speckit-foo notation - Replace literal /speckit.specify with __SPECKIT_COMMAND_SPECIFY__ in extensions/git/commands/speckit.git.feature.md so every agent resolves the reference through its own separator - Apply resolve_command_refs re.sub in agents.py register_commands() after argument-placeholder substitution so extension commands registered for Forge get /speckit-foo refs; all other agents continue to get /speckit.foo Fixes ZSH compatibility: dot-notation command invocations (/speckit.specify) are misinterpreted by ZSH as file-path operations; hyphen notation (/speckit-specify) works correctly in all shells. * fix(agents): propagate invoke_separator from integration class into AGENT_CONFIGS Skills-based agents (claude, codex, kimi, …) inherit invoke_separator="-" from SkillsIntegration but do not repeat it in their registrar_config dicts. _build_agent_configs() was copying registrar_config verbatim, so register_commands() fell back to "." when resolving __SPECKIT_COMMAND_*__ tokens for those agents — emitting /speckit.specify instead of the correct /speckit-specify for extension commands like speckit.git.feature. Fix: after copying registrar_config, inject invoke_separator from the integration's class attribute when it is not already declared explicitly. This makes the integration class the single source of truth for all agents, without requiring each SkillsIntegration subclass to duplicate the field. Also replace the inline re.sub in register_commands() with a call to IntegrationBase.resolve_command_refs() (deferred import to avoid the existing circular dependency) so token-resolution logic is not duplicated. Adds two tests in test_agent_config_consistency.py: - test_skills_agents_have_hyphen_invoke_separator_in_agent_configs: asserts every /SKILL.md agent has invoke_separator="-" in AGENT_CONFIGS. - test_skills_agent_command_token_resolves_with_hyphen: end-to-end check via CommandRegistrar that the git extension's speckit.git.feature command is installed for Claude with /speckit-specify (not /speckit.specify). Addresses review comment on PR #2462. --- .../git/commands/speckit.git.feature.md | 2 +- src/specify_cli/agents.py | 63 ++++++++++---- .../integrations/forge/__init__.py | 3 + tests/integrations/test_integration_forge.py | 78 +++++++++++++++++ tests/test_agent_config_consistency.py | 86 ++++++++++++++++++- 5 files changed, 215 insertions(+), 17 deletions(-) diff --git a/extensions/git/commands/speckit.git.feature.md b/extensions/git/commands/speckit.git.feature.md index 1a9c5e35da..5bed9e5e57 100644 --- a/extensions/git/commands/speckit.git.feature.md +++ b/extensions/git/commands/speckit.git.feature.md @@ -4,7 +4,7 @@ description: "Create a feature branch with sequential or timestamp numbering" # Create Feature Branch -Create and switch to a new git feature branch for the given specification. This command handles **branch creation only** — the spec directory and files are created by the core `/speckit.specify` workflow. +Create and switch to a new git feature branch for the given specification. This command handles **branch creation only** — the spec directory and files are created by the core `__SPECKIT_COMMAND_SPECIFY__` workflow. ## User Input diff --git a/src/specify_cli/agents.py b/src/specify_cli/agents.py index 726b0fd2a6..4d78d5ac41 100644 --- a/src/specify_cli/agents.py +++ b/src/specify_cli/agents.py @@ -7,12 +7,12 @@ """ import os -from pathlib import Path -from typing import Dict, List, Any, Optional - import platform import re from copy import deepcopy +from pathlib import Path +from typing import Any, Dict, List, Optional + import yaml @@ -25,7 +25,16 @@ def _build_agent_configs() -> dict[str, Any]: if key == "generic": continue if integration.registrar_config: - configs[key] = dict(integration.registrar_config) + config = dict(integration.registrar_config) + # Propagate invoke_separator from the integration class when the + # registrar_config dict doesn't already declare it explicitly. + # SkillsIntegration subclasses (claude, codex, …) set + # invoke_separator="-" as a class attribute but omit it from + # registrar_config, so without this they would fall back to "." + # when register_commands() resolves __SPECKIT_COMMAND_*__ tokens. + if "invoke_separator" not in config: + config["invoke_separator"] = integration.invoke_separator + configs[key] = config return configs @@ -419,9 +428,7 @@ def _ensure_inside(candidate: Path, base: Path) -> None: normalized = Path(os.path.normpath(candidate)) base_normalized = Path(os.path.normpath(base)) if not normalized.is_relative_to(base_normalized): - raise ValueError( - f"Output path {candidate!r} escapes directory {base!r}" - ) + raise ValueError(f"Output path {candidate!r} escapes directory {base!r}") def register_commands( self, @@ -471,7 +478,10 @@ def register_commands( if frontmatter.get("strategy") == "wrap": from .presets import _substitute_core_template - body, core_frontmatter = _substitute_core_template(body, cmd_name, project_root, self) + + body, core_frontmatter = _substitute_core_template( + body, cmd_name, project_root, self + ) frontmatter = dict(frontmatter) for key in ("scripts", "agent_scripts"): if key not in frontmatter and key in core_frontmatter: @@ -492,6 +502,16 @@ def register_commands( body, "$ARGUMENTS", agent_config["args"] ) + # Resolve __SPECKIT_COMMAND_*__ tokens using the agent's invoke separator. + # The separator is sourced from agent_config (populated by _build_agent_configs, + # which propagates each integration's invoke_separator class attribute). + # Deferred import of IntegrationBase avoids a circular import at module load + # (base.py itself imports CommandRegistrar lazily). + from specify_cli.integrations.base import IntegrationBase # noqa: PLC0415 + + _sep = agent_config.get("invoke_separator", ".") + body = IntegrationBase.resolve_command_refs(body, _sep) + output_name = self._compute_output_name(agent_name, cmd_name, agent_config) if agent_config["extension"] == "/SKILL.md": @@ -505,12 +525,22 @@ def register_commands( project_root, ) elif agent_config["format"] == "markdown": - body = self.resolve_skill_placeholders(agent_name, frontmatter, body, project_root) - body = self._convert_argument_placeholder(body, "$ARGUMENTS", agent_config["args"]) - output = self.render_markdown_command(frontmatter, body, source_id, context_note) + body = self.resolve_skill_placeholders( + agent_name, frontmatter, body, project_root + ) + body = self._convert_argument_placeholder( + body, "$ARGUMENTS", agent_config["args"] + ) + output = self.render_markdown_command( + frontmatter, body, source_id, context_note + ) elif agent_config["format"] == "toml": - body = self.resolve_skill_placeholders(agent_name, frontmatter, body, project_root) - body = self._convert_argument_placeholder(body, "$ARGUMENTS", agent_config["args"]) + body = self.resolve_skill_placeholders( + agent_name, frontmatter, body, project_root + ) + body = self._convert_argument_placeholder( + body, "$ARGUMENTS", agent_config["args"] + ) output = self.render_toml_command(frontmatter, body, source_id) elif agent_config["format"] == "yaml": output = self.render_yaml_command( @@ -685,8 +715,11 @@ def register_commands_for_non_skill_agents( if agent_dir.exists(): try: registered = self.register_commands( - agent_name, commands, source_id, - source_dir, project_root, + agent_name, + commands, + source_id, + source_dir, + project_root, context_note=context_note, ) if registered: diff --git a/src/specify_cli/integrations/forge/__init__.py b/src/specify_cli/integrations/forge/__init__.py index a941d4c331..47a90687dc 100644 --- a/src/specify_cli/integrations/forge/__init__.py +++ b/src/specify_cli/integrations/forge/__init__.py @@ -87,8 +87,10 @@ class ForgeIntegration(MarkdownIntegration): "strip_frontmatter_keys": ["handoffs"], "inject_name": True, "format_name": format_forge_command_name, # Custom name formatter + "invoke_separator": "-", } context_file = "AGENTS.md" + invoke_separator = "-" def setup( self, @@ -133,6 +135,7 @@ def setup( processed = self.process_template( raw, self.key, script_type, arg_placeholder, context_file=self.context_file or "", + invoke_separator=self.invoke_separator, ) # FORGE-SPECIFIC: Ensure any remaining $ARGUMENTS placeholders are diff --git a/tests/integrations/test_integration_forge.py b/tests/integrations/test_integration_forge.py index 8cd8b17c95..62fee73210 100644 --- a/tests/integrations/test_integration_forge.py +++ b/tests/integrations/test_integration_forge.py @@ -141,6 +141,7 @@ def test_directory_structure(self, tmp_path): assert actual_commands == expected_commands def test_templates_are_processed(self, tmp_path): + import re from specify_cli.integrations.forge import ForgeIntegration forge = ForgeIntegration() m = IntegrationManifest("forge", tmp_path) @@ -157,6 +158,11 @@ def test_templates_are_processed(self, tmp_path): assert "$ARGUMENTS" not in content, f"{cmd_file.name} has unprocessed $ARGUMENTS" # Frontmatter sections should be stripped assert "\nscripts:\n" not in content + # Check Forge-specific: command references use hyphen notation, not dot notation + assert not re.search(r"/speckit\.[a-z]", content), ( + f"{cmd_file.name} contains dot-notation command reference (/speckit.); " + "Forge requires hyphen notation (/speckit-) for ZSH compatibility" + ) def test_plan_references_correct_context_file(self, tmp_path): """The generated plan command must reference forge's context file.""" @@ -224,6 +230,33 @@ def test_uses_parameters_placeholder(self, tmp_path): "checklist should contain {{parameters}} in User Input section" ) + def test_command_refs_use_hyphen_notation(self, tmp_path): + """Verify all generated Forge command files use /speckit-foo, not /speckit.foo.""" + import re + from specify_cli.integrations.forge import ForgeIntegration + forge = ForgeIntegration() + m = IntegrationManifest("forge", tmp_path) + forge.setup(tmp_path, m) + commands_dir = tmp_path / ".forge" / "commands" + + files_with_refs = [] + files_with_dot_refs = [] + for cmd_file in commands_dir.glob("speckit.*.md"): + content = cmd_file.read_text(encoding="utf-8") + if re.search(r"/speckit-[a-z]", content): + files_with_refs.append(cmd_file.name) + if re.search(r"/speckit\.[a-z]", content): + files_with_dot_refs.append(cmd_file.name) + + assert files_with_dot_refs == [], ( + f"Files contain dot-notation command references: {files_with_dot_refs}. " + "Forge requires hyphen notation (/speckit-) for ZSH compatibility." + ) + assert len(files_with_refs) > 0, ( + "Expected at least one generated Forge command to contain /speckit- reference, " + "but none were found. Check that __SPECKIT_COMMAND_*__ tokens are being resolved." + ) + def test_name_field_uses_hyphenated_format(self, tmp_path): """Verify that injected name fields use hyphenated format (speckit-plan, not speckit.plan).""" from specify_cli.integrations.forge import ForgeIntegration @@ -401,3 +434,48 @@ def test_registrar_does_not_affect_other_agents(self, tmp_path): assert "name:" not in content, ( "Windsurf should not inject name field - format_name callback should be Forge-only" ) + + def test_git_extension_command_uses_hyphen_notation(self, tmp_path): + """Verify the git extension's feature command uses /speckit-specify (not /speckit.specify) for Forge.""" + from pathlib import Path + from specify_cli.agents import CommandRegistrar + + # Locate the real git extension command source file + repo_root = Path(__file__).resolve().parent.parent.parent + ext_dir = repo_root / "extensions" / "git" + cmd_source = ext_dir / "commands" / "speckit.git.feature.md" + assert cmd_source.exists(), ( + f"Git extension command source not found at {cmd_source}. " + "Ensure extensions/git/commands/speckit.git.feature.md exists." + ) + + registrar = CommandRegistrar() + commands = [ + { + "name": "speckit.git.feature", + "file": "commands/speckit.git.feature.md", + } + ] + + registered = registrar.register_commands( + "forge", + commands, + "git", + ext_dir, + tmp_path, + ) + + assert "speckit.git.feature" in registered + + forge_cmd = tmp_path / ".forge" / "commands" / "speckit.git.feature.md" + assert forge_cmd.exists(), "Expected Forge command file was not created" + + content = forge_cmd.read_text(encoding="utf-8") + assert "/speckit-specify" in content, ( + "Expected '/speckit-specify' (hyphen) in generated Forge git.feature command body, " + "but it was not found. Check that __SPECKIT_COMMAND_SPECIFY__ is resolved correctly." + ) + assert "/speckit.specify" not in content, ( + "Found '/speckit.specify' (dot notation) in generated Forge git.feature command body. " + "Forge requires hyphen notation for ZSH compatibility." + ) diff --git a/tests/test_agent_config_consistency.py b/tests/test_agent_config_consistency.py index 75e80fdf33..2f0fe15127 100644 --- a/tests/test_agent_config_consistency.py +++ b/tests/test_agent_config_consistency.py @@ -5,7 +5,6 @@ from specify_cli import AGENT_CONFIG, AI_ASSISTANT_ALIASES, AI_ASSISTANT_HELP from specify_cli.extensions import CommandRegistrar - REPO_ROOT = Path(__file__).resolve().parent.parent @@ -199,3 +198,88 @@ def test_goose_in_extension_registrar(self): def test_ai_help_includes_goose(self): """CLI help text for --ai should include goose.""" assert "goose" in AI_ASSISTANT_HELP + + # --- invoke_separator propagation checks --- + + def test_skills_agents_have_hyphen_invoke_separator_in_agent_configs(self): + """Skills-based agents must expose invoke_separator='-' in AGENT_CONFIGS. + + SkillsIntegration sets ``invoke_separator = "-"`` as a class attribute, + but individual skills integrations (claude, codex, …) do not repeat it in + their ``registrar_config`` dicts. ``_build_agent_configs()`` must + propagate the class attribute so that ``register_commands()`` resolves + ``__SPECKIT_COMMAND_*__`` tokens with the correct hyphen separator. + """ + cfg = CommandRegistrar.AGENT_CONFIGS + skills_agents = [ + key for key, c in cfg.items() if c.get("extension") == "/SKILL.md" + ] + assert skills_agents, ( + "Expected at least one skills-based agent in AGENT_CONFIGS" + ) + for agent in skills_agents: + assert cfg[agent].get("invoke_separator") == "-", ( + f"Skills agent '{agent}' has invoke_separator=" + f"{cfg[agent].get('invoke_separator')!r} in AGENT_CONFIGS; " + "expected '-' (propagated from SkillsIntegration.invoke_separator)" + ) + + def test_skills_agent_command_token_resolves_with_hyphen(self, tmp_path): + """__SPECKIT_COMMAND_*__ tokens in extension commands resolve to /speckit- + when registered for a skills-based agent (e.g. claude). + + Regression guard: before the fix, _build_agent_configs() did not + propagate invoke_separator from the integration class, so + register_commands() fell back to '.' and emitted /speckit.specify instead + of /speckit-specify for skills agents. + """ + import re + from pathlib import Path + + from specify_cli.agents import CommandRegistrar + + repo_root = Path(__file__).resolve().parent.parent + ext_dir = repo_root / "extensions" / "git" + cmd_source = ext_dir / "commands" / "speckit.git.feature.md" + assert cmd_source.exists(), ( + f"Git extension command source not found at {cmd_source}" + ) + assert "__SPECKIT_COMMAND_SPECIFY__" in cmd_source.read_text( + encoding="utf-8" + ), ( + "Expected __SPECKIT_COMMAND_SPECIFY__ token in speckit.git.feature.md; " + "check that the file uses the token rather than a hard-coded ref." + ) + + registrar = CommandRegistrar() + commands = [ + {"name": "speckit.git.feature", "file": "commands/speckit.git.feature.md"} + ] + + registered = registrar.register_commands( + "claude", + commands, + "git", + ext_dir, + tmp_path, + ) + + assert "speckit.git.feature" in registered + skill_file = ( + tmp_path / ".claude" / "skills" / "speckit-git-feature" / "SKILL.md" + ) + assert skill_file.exists(), ( + f"Expected Claude skill file not found at {skill_file}" + ) + content = skill_file.read_text(encoding="utf-8") + assert "/speckit-specify" in content, ( + "Expected '/speckit-specify' (hyphen) in generated Claude skill for git.feature; " + "__SPECKIT_COMMAND_SPECIFY__ was not resolved with the correct separator." + ) + # Negative lookbehind (?) in generated Claude skill. " + "Skills agents must use hyphen notation." + ) From b5fad5129274fe905606ec74ef03239b4b530363 Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Wed, 29 Apr 2026 22:31:56 -0400 Subject: [PATCH 21/26] feat(workflows): support file-backed inputs --- docs/reference/workflows.md | 13 ++- src/specify_cli/__init__.py | 105 ++++++++++++++++++-- tests/test_workflows.py | 188 ++++++++++++++++++++++++++++++++++++ 3 files changed, 291 insertions(+), 15 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index e7e921e1e9..fadff19656 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -8,16 +8,19 @@ Workflows automate multi-step Spec-Driven Development processes — chaining com specify workflow run ``` -| Option | Description | -| ------------------- | -------------------------------------------------------- | -| `-i` / `--input` | Pass input values as `key=value` (repeatable) | +| Option | Description | +| ------------------- | ------------------------------------------------------------------------------------------------ | +| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | +| `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from a catalog ID, URL, or local file path. Inputs declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from a catalog ID, URL, or local file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. Example: ```bash -specify workflow run speckit -i spec="Build a kanban board with drag-and-drop task management" -i scope=full +specify workflow run ./workflow.yml -i prompt="Build a workflow" -i scope=full +specify workflow run ./workflow.yml --input prompt=@docs/prompt.md +specify workflow run ./workflow.yml --input-file payload.json -i scope=full ``` > **Note:** All workflow commands require a project already initialized with `specify init`. diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 176eecc2d4..d08335c49b 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5257,11 +5257,100 @@ def extension_set_priority( workflow_app.add_typer(workflow_catalog_app, name="catalog") +def _resolve_workflow_cli_path(raw_path: str) -> Path: + """Resolve workflow CLI file paths from the current working directory.""" + path = Path(raw_path).expanduser() + if not path.is_absolute(): + path = Path.cwd() / path + return path + + +def _read_workflow_cli_file(raw_path: str, description: str) -> tuple[Path, str]: + """Read a text file referenced by a workflow CLI input option.""" + cleaned_path = raw_path.strip() + if not cleaned_path: + raise ValueError(f"Missing file path for {description}.") + + path = _resolve_workflow_cli_path(cleaned_path) + if not path.exists(): + raise ValueError(f"File for {description} not found: {path}") + if not path.is_file(): + raise ValueError(f"Path for {description} is not a file: {path}") + + try: + return path, path.read_text(encoding="utf-8") + except UnicodeDecodeError as exc: + raise ValueError( + f"Unable to read file for {description} as UTF-8 text: {path}" + ) from exc + except OSError as exc: + raise ValueError( + f"Unable to read file for {description}: {path} ({exc})" + ) from exc + + +def _load_workflow_input_file(input_file: str) -> dict[str, Any]: + """Load workflow inputs from a JSON object file.""" + path, raw_json = _read_workflow_cli_file(input_file, "--input-file") + try: + data = json.loads(raw_json) + except json.JSONDecodeError as exc: + raise ValueError( + f"Invalid JSON in --input-file {path}: " + f"{exc.msg} at line {exc.lineno}, column {exc.colno}" + ) from exc + + if not isinstance(data, dict): + raise ValueError( + f"--input-file must contain a JSON object, got {type(data).__name__}." + ) + return data + + +def _parse_workflow_inputs( + input_values: list[str] | None, + input_file: str | None, +) -> dict[str, Any]: + """Normalize workflow CLI input options into the engine input dict.""" + inputs: dict[str, Any] = {} + + if input_file is not None: + inputs.update(_load_workflow_input_file(input_file)) + + if input_values: + for kv in input_values: + if "=" not in kv: + raise ValueError( + f"Invalid input format: {kv!r} (expected key=value)" + ) + key, _, raw_value = kv.partition("=") + key = key.strip() + if not key: + raise ValueError( + f"Invalid input format: {kv!r} (key cannot be empty)" + ) + + value = raw_value.strip() + if value.startswith("@"): + file_ref = value[1:].strip() + if file_ref and _resolve_workflow_cli_path(file_ref).exists(): + _, value = _read_workflow_cli_file(file_ref, f"input {key!r}") + inputs[key] = value + + return inputs + + @workflow_app.command("run") def workflow_run( source: str = typer.Argument(..., help="Workflow ID or YAML file path"), input_values: list[str] | None = typer.Option( - None, "--input", "-i", help="Input values as key=value pairs" + None, + "--input", + "-i", + help="Input values as key=value pairs; use key=@path to read a text file", + ), + input_file: str | None = typer.Option( + None, "--input-file", help="Load input values from a JSON object file" ), ): """Run a workflow from an installed ID or local YAML path.""" @@ -5288,15 +5377,11 @@ def workflow_run( console.print(f" • {err}") raise typer.Exit(1) - # Parse inputs - inputs: dict[str, Any] = {} - if input_values: - for kv in input_values: - if "=" not in kv: - console.print(f"[red]Error:[/red] Invalid input format: {kv!r} (expected key=value)") - raise typer.Exit(1) - key, _, value = kv.partition("=") - inputs[key.strip()] = value.strip() + try: + inputs = _parse_workflow_inputs(input_values, input_file) + except ValueError as exc: + console.print(f"[red]Error:[/red] {exc}") + raise typer.Exit(1) console.print(f"\n[bold cyan]Running workflow:[/bold cyan] {definition.name} ({definition.id})") console.print(f"[dim]Version: {definition.version}[/dim]\n") diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 4c042fc7d5..d4c4d4b302 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -84,6 +84,194 @@ def sample_workflow_file(project_dir, sample_workflow_yaml): return wf_path +# ===== Workflow CLI Input Tests ===== + +class TestWorkflowCliInputs: + """Test workflow run input normalization at the CLI boundary.""" + + def test_inline_input_still_works(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["spec=Build a kanban board", "scope=full"], + None, + ) + + assert inputs == { + "spec": "Build a kanban board", + "scope": "full", + } + + def test_at_file_input_reads_file_contents_for_generic_key( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + desc_file = project_dir / "desc.md" + desc_text = "# Description\n\nBuild a workflow.\n" + desc_file.write_text(desc_text, encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["description=@desc.md"], None) + + assert inputs == {"description": desc_text} + + @pytest.mark.parametrize("literal", ["@alice", "@"]) + def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs([f"assignee={literal}"], None) + + assert inputs == {"assignee": literal} + + def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not found"): + _parse_workflow_inputs(None, "missing.json") + + def test_input_file_loads_json_object(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(None, "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "full", + } + + def test_direct_input_overrides_input_file(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"prompt": "Build a workflow", "scope": "full"}), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs(["scope=minimal"], "payload.json") + + assert inputs == { + "prompt": "Build a workflow", + "scope": "minimal", + } + + def test_invalid_json_input_file_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text("{invalid json", encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="Invalid JSON"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize("payload", ["[]", '"not an object"']) + def test_non_object_json_input_file_fails_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="JSON object"): + _parse_workflow_inputs(None, "payload.json") + + def test_malformed_inline_input_fails_cleanly(self): + from specify_cli import _parse_workflow_inputs + + with pytest.raises(ValueError, match="expected key=value"): + _parse_workflow_inputs(["spec"], None) + + def test_workflow_run_passes_normalized_inputs_to_engine( + self, + project_dir, + monkeypatch, + ): + from typer.testing import CliRunner + from specify_cli import app + from specify_cli.workflows import engine as engine_module + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({"spec": "Build a kanban board", "scope": "minimal"}), + encoding="utf-8", + ) + captured: dict[str, object] = {} + + class FakeDefinition: + id = "speckit" + name = "Spec Kit" + version = "1.0.0" + + class FakeStatus: + value = "completed" + + class FakeState: + status = FakeStatus() + run_id = "run-1" + + class FakeWorkflowEngine: + def __init__(self, project_root): + self.project_root = project_root + self.on_step_start = None + + def load_workflow(self, source): + captured["source"] = source + return FakeDefinition() + + def validate(self, definition): + return [] + + def execute(self, definition, inputs): + captured["inputs"] = inputs + return FakeState() + + monkeypatch.setattr(engine_module, "WorkflowEngine", FakeWorkflowEngine) + monkeypatch.chdir(project_dir) + + result = CliRunner().invoke( + app, + [ + "workflow", + "run", + "speckit", + "--input-file", + "payload.json", + "--input", + "scope=full", + ], + ) + + assert result.exit_code == 0, result.output + assert captured["source"] == "speckit" + assert captured["inputs"] == { + "spec": "Build a kanban board", + "scope": "full", + } + + # ===== Step Registry Tests ===== class TestStepRegistry: From c38fab22e55898f0b2add8c7aa0bcaaa578217cb Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 00:42:26 -0400 Subject: [PATCH 22/26] Fix workflow @input directory handling --- src/specify_cli/__init__.py | 8 ++++++-- tests/test_workflows.py | 21 +++++++++++++++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index d08335c49b..3f4883a727 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5333,8 +5333,12 @@ def _parse_workflow_inputs( value = raw_value.strip() if value.startswith("@"): file_ref = value[1:].strip() - if file_ref and _resolve_workflow_cli_path(file_ref).exists(): - _, value = _read_workflow_cli_file(file_ref, f"input {key!r}") + if file_ref: + candidate_path = _resolve_workflow_cli_path(file_ref) + if candidate_path.exists() and candidate_path.is_file(): + _, value = _read_workflow_cli_file( + file_ref, f"input {key!r}" + ) inputs[key] = value return inputs diff --git a/tests/test_workflows.py b/tests/test_workflows.py index d4c4d4b302..58db0ba508 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -130,6 +130,18 @@ def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): assert inputs == {"assignee": literal} + def test_existing_at_directory_stays_literal(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "some_existing_directory").mkdir() + monkeypatch.chdir(project_dir) + + assert _parse_workflow_inputs(["x=@."], None) == {"x": "@."} + assert _parse_workflow_inputs( + ["x=@some_existing_directory"], + None, + ) == {"x": "@some_existing_directory"} + def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -138,6 +150,15 @@ def test_missing_input_file_fails_cleanly(self, project_dir, monkeypatch): with pytest.raises(ValueError, match="not found"): _parse_workflow_inputs(None, "missing.json") + def test_input_file_directory_fails_cleanly(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "payload.json").mkdir() + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="not a file"): + _parse_workflow_inputs(None, "payload.json") + def test_input_file_loads_json_object(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs From c5b154939a9c93553632f098d3bb8b9b406795ef Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 00:51:56 -0400 Subject: [PATCH 23/26] Clarify workflow run source docs --- docs/reference/workflows.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index fadff19656..086b559da4 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -13,7 +13,7 @@ specify workflow run | `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | | `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from a catalog ID, URL, or local file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. Example: From eaa519507bc95d2da5ce504cda29e1a9f32a9ef4 Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Thu, 30 Apr 2026 01:01:49 -0400 Subject: [PATCH 24/26] Clarify workflow input file reference docs --- docs/reference/workflows.md | 4 ++-- src/specify_cli/__init__.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index 086b559da4..e350aecdb0 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -10,10 +10,10 @@ specify workflow run | Option | Description | | ------------------- | ------------------------------------------------------------------------------------------------ | -| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); use `key=@path` to read text files | +| `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); `key=@path` reads an existing text file, otherwise `@` values stay literal | | `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | -Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or will be prompted interactively. +Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or `--input-file`, or will be prompted interactively. Example: diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 3f4883a727..85f9541180 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5351,7 +5351,10 @@ def workflow_run( None, "--input", "-i", - help="Input values as key=value pairs; use key=@path to read a text file", + help=( + "Input values as key=value pairs; key=@path reads an existing text " + "file, otherwise @ values stay literal" + ), ), input_file: str | None = typer.Option( None, "--input-file", help="Load input values from a JSON object file" From 6f4d525164891f0b3b5dc94162c30fce8766bbef Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Mon, 4 May 2026 11:06:04 -0400 Subject: [PATCH 25/26] Address workflow file input review feedback --- docs/reference/workflows.md | 4 +- src/specify_cli/__init__.py | 61 ++++++++++++++++++++- tests/test_workflows.py | 105 ++++++++++++++++++++++++++++++++++++ 3 files changed, 167 insertions(+), 3 deletions(-) diff --git a/docs/reference/workflows.md b/docs/reference/workflows.md index e350aecdb0..2b02ff3e1b 100644 --- a/docs/reference/workflows.md +++ b/docs/reference/workflows.md @@ -11,7 +11,7 @@ specify workflow run | Option | Description | | ------------------- | ------------------------------------------------------------------------------------------------ | | `-i` / `--input` | Pass workflow inputs/parameters as `key=value` (repeatable); `key=@path` reads an existing text file, otherwise `@` values stay literal | -| `--input-file` | Load workflow inputs/parameters from a JSON object file; repeatable `--input` values override file values | +| `--input-file` | Load workflow inputs/parameters from a JSON object file with string, number, or boolean values; repeatable `--input` values override file values | Runs a workflow from an installed workflow ID or a local `.yml`/`.yaml` file path. Inputs/parameters declared by the workflow can be provided via `--input` or `--input-file`, or will be prompted interactively. @@ -23,6 +23,8 @@ specify workflow run ./workflow.yml --input prompt=@docs/prompt.md specify workflow run ./workflow.yml --input-file payload.json -i scope=full ``` +For boolean, number, and enum-constrained inputs, surrounding whitespace from file-backed string values is trimmed before normal workflow input coercion. Free-form string inputs preserve file contents. + > **Note:** All workflow commands require a project already initialized with `specify init`. ## Resume a Workflow diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 85f9541180..588ba5009c 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -34,6 +34,7 @@ import shutil import json import json5 +import math import stat import shlex import urllib.error @@ -5289,6 +5290,36 @@ def _read_workflow_cli_file(raw_path: str, description: str) -> tuple[Path, str] ) from exc +def _json_type_name(value: Any) -> str: + """Return a user-facing JSON type name for validation errors.""" + if value is None: + return "null" + if isinstance(value, dict): + return "object" + if isinstance(value, list): + return "array" + if isinstance(value, bool): + return "boolean" + if isinstance(value, (int, float)): + return "number" + if isinstance(value, str): + return "string" + return type(value).__name__ + + +def _validate_workflow_input_file_value(key: str, value: Any) -> None: + """Ensure --input-file values match the supported workflow input scalars.""" + if isinstance(value, float) and not math.isfinite(value): + raise ValueError( + f"--input-file value for {key!r} must be a finite number." + ) + if not isinstance(value, (str, int, float, bool)): + raise ValueError( + f"--input-file value for {key!r} must be a string, number, " + f"or boolean, got {_json_type_name(value)}." + ) + + def _load_workflow_input_file(input_file: str) -> dict[str, Any]: """Load workflow inputs from a JSON object file.""" path, raw_json = _read_workflow_cli_file(input_file, "--input-file") @@ -5304,18 +5335,40 @@ def _load_workflow_input_file(input_file: str) -> dict[str, Any]: raise ValueError( f"--input-file must contain a JSON object, got {type(data).__name__}." ) + for key, value in data.items(): + _validate_workflow_input_file_value(str(key), value) return data +def _normalize_workflow_cli_scalar( + value: Any, + input_def: dict[str, Any] | None, +) -> Any: + """Normalize file-backed scalars when workflow coercion expects scalars.""" + if not isinstance(value, str) or not isinstance(input_def, dict): + return value + + input_type = input_def.get("type", "string") + if input_type in ("number", "boolean") or input_def.get("enum") is not None: + return value.strip() + return value + + def _parse_workflow_inputs( input_values: list[str] | None, input_file: str | None, + input_definitions: dict[str, Any] | None = None, ) -> dict[str, Any]: """Normalize workflow CLI input options into the engine input dict.""" inputs: dict[str, Any] = {} + input_definitions = input_definitions or {} if input_file is not None: - inputs.update(_load_workflow_input_file(input_file)) + for key, value in _load_workflow_input_file(input_file).items(): + inputs[key] = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) if input_values: for kv in input_values: @@ -5339,6 +5392,10 @@ def _parse_workflow_inputs( _, value = _read_workflow_cli_file( file_ref, f"input {key!r}" ) + value = _normalize_workflow_cli_scalar( + value, + input_definitions.get(key), + ) inputs[key] = value return inputs @@ -5385,7 +5442,7 @@ def workflow_run( raise typer.Exit(1) try: - inputs = _parse_workflow_inputs(input_values, input_file) + inputs = _parse_workflow_inputs(input_values, input_file, definition.inputs) except ValueError as exc: console.print(f"[red]Error:[/red] {exc}") raise typer.Exit(1) diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 58db0ba508..1128aacf36 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -120,6 +120,34 @@ def test_at_file_input_reads_file_contents_for_generic_key( assert inputs == {"description": desc_text} + def test_at_file_input_normalizes_typed_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + (project_dir / "enabled.txt").write_text("true\n", encoding="utf-8") + (project_dir / "scope.txt").write_text("full\n", encoding="utf-8") + (project_dir / "notes.md").write_text("line one\n", encoding="utf-8") + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + ["enabled=@enabled.txt", "scope=@scope.txt", "notes=@notes.md"], + None, + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "notes": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "notes": "line one\n", + } + @pytest.mark.parametrize("literal", ["@alice", "@"]) def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -176,6 +204,40 @@ def test_input_file_loads_json_object(self, project_dir, monkeypatch): "scope": "full", } + def test_input_file_normalizes_typed_string_scalars( + self, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({ + "enabled": "true\n", + "scope": "full\n", + "prompt": "Keep trailing newline\n", + }), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + None, + "payload.json", + { + "enabled": {"type": "boolean"}, + "scope": {"type": "string", "enum": ["full", "minimal"]}, + "prompt": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "scope": "full", + "prompt": "Keep trailing newline\n", + } + def test_direct_input_overrides_input_file(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -219,6 +281,48 @@ def test_non_object_json_input_file_fails_cleanly( with pytest.raises(ValueError, match="JSON object"): _parse_workflow_inputs(None, "payload.json") + @pytest.mark.parametrize( + "payload", + [ + {"spec": {"text": "Build a workflow"}}, + {"spec": ["Build a workflow"]}, + {"spec": None}, + ], + ) + def test_non_scalar_json_input_file_values_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(json.dumps(payload), encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="string, number, or boolean"): + _parse_workflow_inputs(None, "payload.json") + + @pytest.mark.parametrize( + "payload", + ['{"spec": NaN}', '{"spec": Infinity}', '{"spec": 1e999}'], + ) + def test_non_finite_json_input_file_numbers_fail_cleanly( + self, + payload, + project_dir, + monkeypatch, + ): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text(payload, encoding="utf-8") + monkeypatch.chdir(project_dir) + + with pytest.raises(ValueError, match="finite number"): + _parse_workflow_inputs(None, "payload.json") + def test_malformed_inline_input_fails_cleanly(self): from specify_cli import _parse_workflow_inputs @@ -245,6 +349,7 @@ class FakeDefinition: id = "speckit" name = "Spec Kit" version = "1.0.0" + inputs = {} class FakeStatus: value = "completed" From f027c01e6bf0c188b3830b60b21861ab2c6c65b4 Mon Sep 17 00:00:00 2001 From: Adrian Osorio Blanchard Date: Wed, 6 May 2026 13:57:26 -0400 Subject: [PATCH 26/26] fix(workflows): use JSON type names for input-file errors --- src/specify_cli/__init__.py | 17 ++++-- tests/test_workflows.py | 113 ++++++++++++++++++++++++++++++++++++ 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/src/specify_cli/__init__.py b/src/specify_cli/__init__.py index 588ba5009c..4732153561 100644 --- a/src/specify_cli/__init__.py +++ b/src/specify_cli/__init__.py @@ -5320,6 +5320,13 @@ def _validate_workflow_input_file_value(key: str, value: Any) -> None: ) +def _workflow_cli_scalar_to_string(value: Any) -> str: + """Render JSON scalars the same way repeated --input key=value does.""" + if isinstance(value, bool): + return "true" if value else "false" + return str(value) + + def _load_workflow_input_file(input_file: str) -> dict[str, Any]: """Load workflow inputs from a JSON object file.""" path, raw_json = _read_workflow_cli_file(input_file, "--input-file") @@ -5333,7 +5340,7 @@ def _load_workflow_input_file(input_file: str) -> dict[str, Any]: if not isinstance(data, dict): raise ValueError( - f"--input-file must contain a JSON object, got {type(data).__name__}." + f"--input-file must contain a JSON object, got {_json_type_name(data)}." ) for key, value in data.items(): _validate_workflow_input_file_value(str(key), value) @@ -5366,7 +5373,7 @@ def _parse_workflow_inputs( if input_file is not None: for key, value in _load_workflow_input_file(input_file).items(): inputs[key] = _normalize_workflow_cli_scalar( - value, + _workflow_cli_scalar_to_string(value), input_definitions.get(key), ) @@ -5384,7 +5391,9 @@ def _parse_workflow_inputs( ) value = raw_value.strip() - if value.startswith("@"): + if value.startswith("@@"): + value = value[1:] + elif value.startswith("@"): file_ref = value[1:].strip() if file_ref: candidate_path = _resolve_workflow_cli_path(file_ref) @@ -5410,7 +5419,7 @@ def workflow_run( "-i", help=( "Input values as key=value pairs; key=@path reads an existing text " - "file, otherwise @ values stay literal" + "file, key=@@value passes a literal @value" ), ), input_file: str | None = typer.Option( diff --git a/tests/test_workflows.py b/tests/test_workflows.py index 1128aacf36..3ec0f94815 100644 --- a/tests/test_workflows.py +++ b/tests/test_workflows.py @@ -89,6 +89,34 @@ def sample_workflow_file(project_dir, sample_workflow_yaml): class TestWorkflowCliInputs: """Test workflow run input normalization at the CLI boundary.""" + @staticmethod + def _install_parse_only_workflow_engine(monkeypatch, inputs=None): + from specify_cli.workflows import engine as engine_module + + input_definitions = inputs or {} + + class FakeDefinition: + id = "speckit" + name = "Spec Kit" + version = "1.0.0" + inputs = input_definitions + + class FakeWorkflowEngine: + def __init__(self, project_root): + self.project_root = project_root + self.on_step_start = None + + def load_workflow(self, source): + return FakeDefinition() + + def validate(self, definition): + return [] + + def execute(self, definition, parsed_inputs): + raise AssertionError("workflow should not execute after input errors") + + monkeypatch.setattr(engine_module, "WorkflowEngine", FakeWorkflowEngine) + def test_inline_input_still_works(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -158,6 +186,19 @@ def test_missing_at_file_stays_literal(self, literal, project_dir, monkeypatch): assert inputs == {"assignee": literal} + def test_escaped_at_file_stays_literal(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + (project_dir / "alice").write_text("file contents", encoding="utf-8") + monkeypatch.chdir(project_dir) + + assert _parse_workflow_inputs(["assignee=@@alice"], None) == { + "assignee": "@alice", + } + assert _parse_workflow_inputs(["assignee=@@"], None) == { + "assignee": "@", + } + def test_existing_at_directory_stays_literal(self, project_dir, monkeypatch): from specify_cli import _parse_workflow_inputs @@ -204,6 +245,42 @@ def test_input_file_loads_json_object(self, project_dir, monkeypatch): "scope": "full", } + def test_input_file_stringifies_json_scalars(self, project_dir, monkeypatch): + from specify_cli import _parse_workflow_inputs + + payload_file = project_dir / "payload.json" + payload_file.write_text( + json.dumps({ + "enabled": True, + "disabled": False, + "count": 3, + "ratio": 1.5, + "prompt": "Build a workflow", + }), + encoding="utf-8", + ) + monkeypatch.chdir(project_dir) + + inputs = _parse_workflow_inputs( + None, + "payload.json", + { + "enabled": {"type": "string"}, + "disabled": {"type": "string"}, + "count": {"type": "string"}, + "ratio": {"type": "string"}, + "prompt": {"type": "string"}, + }, + ) + + assert inputs == { + "enabled": "true", + "disabled": "false", + "count": "3", + "ratio": "1.5", + "prompt": "Build a workflow", + } + def test_input_file_normalizes_typed_string_scalars( self, project_dir, @@ -397,6 +474,42 @@ def execute(self, definition, inputs): "scope": "full", } + @pytest.mark.parametrize( + ("args", "payload", "expected"), + [ + (["--input-file", "missing.json"], None, "not found"), + (["--input-file", "payload.json"], "{invalid json", "Invalid JSON"), + (["--input-file", "payload.json"], "[]", "JSON object"), + ( + ["--input-file", "payload.json"], + json.dumps({"spec": {"text": "Build a workflow"}}), + "string, number, or boolean", + ), + (["--input-file", "payload.json"], '{"spec": NaN}', "finite number"), + (["--input", "spec"], None, "expected key=value"), + ], + ) + def test_workflow_run_input_errors_exit_cleanly( + self, + args, + payload, + expected, + project_dir, + monkeypatch, + ): + from typer.testing import CliRunner + from specify_cli import app + + if payload is not None: + (project_dir / "payload.json").write_text(payload, encoding="utf-8") + self._install_parse_only_workflow_engine(monkeypatch) + monkeypatch.chdir(project_dir) + + result = CliRunner().invoke(app, ["workflow", "run", "speckit", *args]) + + assert result.exit_code == 1, result.output + assert expected in result.output + # ===== Step Registry Tests =====