diff --git a/AGENTS.md b/AGENTS.md index abaed54..2b35ac6 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -17,7 +17,7 @@ g is a lightweight CLI wrapper that proxies to the current directory's VCS comma Key features: - Detects VCS by walking parent directories and mapping `.git`, `.svn`, or `.hg` -- Proxies CLI arguments directly to the detected VCS binary +- Proxies CLI arguments to the detected VCS binary (--version/-V is handled by g) - Minimal surface area: primary logic lives in `src/g/__init__.py` - Test fixtures cover CLI behavior for both repo and non-repo directories diff --git a/CHANGES b/CHANGES index 59bbebf..0498301 100644 --- a/CHANGES +++ b/CHANGES @@ -33,7 +33,25 @@ $ uvx --from 'g' --prerelease allow g _Notes on upcoming releases will be added here_ - +### CLI + +- `g --version` and `g -V` now display g's version instead of being passed to + the underlying VCS (#46) + + This makes it easier to check which version of g you have installed: + + ```console + $ g --version + g 0.0.10 + ``` + +### Documentation + +- Add [CLI documentation page](https://g.git-pull.com/cli/) with complete + command reference (#46) + +- CLI argument documentation now has linkable anchors (headerlinks) for easy + sharing and reference (#46) ## g 0.0.9 (2026-01-24) diff --git a/docs/_ext/__init__.py b/docs/_ext/__init__.py new file mode 100644 index 0000000..2dea8b9 --- /dev/null +++ b/docs/_ext/__init__.py @@ -0,0 +1,3 @@ +"""Sphinx extensions for g documentation.""" + +from __future__ import annotations diff --git a/docs/_ext/argparse_exemplar.py b/docs/_ext/argparse_exemplar.py new file mode 100644 index 0000000..3ad2847 --- /dev/null +++ b/docs/_ext/argparse_exemplar.py @@ -0,0 +1,1318 @@ +"""Transform argparse epilog "examples:" definition lists into documentation sections. + +This Sphinx extension post-processes sphinx_argparse_neo output to convert +specially-formatted "examples:" definition lists in argparse epilogs into +proper documentation sections with syntax-highlighted code blocks. + +The extension is designed to be generic and reusable across different projects. +All behavior can be customized via Sphinx configuration options. + +Purpose +------- +When documenting CLI tools with argparse, it's useful to include examples in +the epilog. This extension recognizes a specific definition list format and +transforms it into structured documentation sections that appear in the TOC. + +Input Format +------------ +Format your argparse epilog with definition lists where terms end with "examples:": + +.. code-block:: python + + parser = argparse.ArgumentParser( + epilog=textwrap.dedent(''' + examples: + myapp sync + myapp sync myrepo + + Machine-readable output examples: + myapp sync --json + myapp sync -F json myrepo + '''), + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + +The epilog text will be parsed as a definition list by docutils, with: +- Terms: "examples:", "Machine-readable output examples:", etc. +- Definitions: The example commands (one per line) + +Output +------ +The extension transforms these into proper sections: + +- A base "examples:" term creates an "Examples" section +- Category-prefixed terms like "Machine-readable output examples:" create + subsections nested under the parent Examples section +- Each command line becomes a syntax-highlighted console code block + +Configuration +------------- +Configure via conf.py. All options have sensible defaults. + +**Term Detection:** + +``argparse_examples_term_suffix`` : str (default: "examples") + Term must end with this string to be treated as an examples header. + +``argparse_examples_base_term`` : str (default: "examples") + Exact match for the base examples section (case-insensitive). + +``argparse_examples_section_title`` : str (default: "Examples") + Title used for the base examples section. + +**Usage Detection:** + +``argparse_usage_pattern`` : str (default: "usage:") + Text must start with this to be treated as a usage block (case-insensitive). + +**Code Block Formatting:** + +``argparse_examples_command_prefix`` : str (default: "$ ") + Prefix added to each command line in examples code blocks. + +``argparse_examples_code_language`` : str (default: "console") + Language identifier for examples code blocks. + +``argparse_examples_code_classes`` : list[str] (default: ["highlight-console"]) + CSS classes added to examples code blocks. + +``argparse_usage_code_language`` : str (default: "cli-usage") + Language identifier for usage blocks. + +**Behavior:** + +``argparse_reorder_usage_before_examples`` : bool (default: True) + Whether to reorder nodes so usage appears before examples. + +Additional Features +------------------- +- Removes ANSI escape codes (useful when FORCE_COLOR is set) +- Applies syntax highlighting to usage blocks +- Reorders sections so usage appears before examples in the output +- Extracts sections from argparse_program containers for TOC visibility + +Project-Specific Setup +---------------------- +Projects using this extension should register their own lexers and CSS in +their conf.py setup() function. For example:: + + def setup(app): + from my_lexer import MyLexer + app.add_lexer("my-output", MyLexer) + app.add_css_file("css/my-highlight.css") +""" + +from __future__ import annotations + +import dataclasses +import typing as t + +from docutils import nodes +from sphinx_argparse_neo.directive import ArgparseDirective +from sphinx_argparse_neo.utils import strip_ansi + +if t.TYPE_CHECKING: + import sphinx.config + from sphinx.application import Sphinx + + +@dataclasses.dataclass +class ExemplarConfig: + """Configuration for argparse_exemplar transformation. + + This dataclass provides all configurable options for the argparse_exemplar + extension. Functions accept an optional config parameter with a factory + default, allowing them to work standalone with defaults or accept custom + config for full control. + + Attributes + ---------- + examples_term_suffix : str + Term must end with this string (case-insensitive) to be treated as an + examples header. Default: "examples". + examples_base_term : str + Exact match (case-insensitive, after stripping ":") for the base + examples section. Default: "examples". + examples_section_title : str + Title used for the base examples section. Default: "Examples". + usage_pattern : str + Text must start with this string (case-insensitive, after stripping + whitespace) to be treated as a usage block. Default: "usage:". + command_prefix : str + Prefix added to each command line in examples code blocks. + Default: "$ ". + code_language : str + Language identifier for examples code blocks. Default: "console". + code_classes : tuple[str, ...] + CSS classes added to examples code blocks. + Default: ("highlight-console",). + usage_code_language : str + Language identifier for usage blocks. Default: "cli-usage". + reorder_usage_before_examples : bool + Whether to reorder nodes so usage appears before examples. + Default: True. + + Examples + -------- + Using default configuration: + + >>> config = ExemplarConfig() + >>> config.examples_term_suffix + 'examples' + >>> config.command_prefix + '$ ' + + Custom configuration: + + >>> config = ExemplarConfig( + ... command_prefix="> ", + ... code_language="bash", + ... ) + >>> config.command_prefix + '> ' + >>> config.code_language + 'bash' + """ + + # Term detection + examples_term_suffix: str = "examples" + examples_base_term: str = "examples" + examples_section_title: str = "Examples" + + # Usage detection + usage_pattern: str = "usage:" + + # Code block formatting + command_prefix: str = "$ " + code_language: str = "console" + code_classes: tuple[str, ...] = ("highlight-console",) + usage_code_language: str = "cli-usage" + + # Behavior + reorder_usage_before_examples: bool = True + + @classmethod + def from_sphinx_config(cls, config: sphinx.config.Config) -> ExemplarConfig: + """Create ExemplarConfig from Sphinx configuration. + + Parameters + ---------- + config : sphinx.config.Config + The Sphinx configuration object. + + Returns + ------- + ExemplarConfig + Configuration populated from Sphinx config values. + + Examples + -------- + Create config from a Sphinx configuration object (typically called from + a directive's run() method): + + >>> from types import SimpleNamespace + >>> mock_config = SimpleNamespace() + >>> mock_config.argparse_examples_command_prefix = "> " + >>> mock_config.argparse_examples_code_language = "bash" + >>> config = ExemplarConfig.from_sphinx_config(mock_config) + >>> config.command_prefix + '> ' + >>> config.code_language + 'bash' + + Attributes not set on the config object use defaults: + + >>> config.examples_term_suffix + 'examples' + """ + # Get code_classes as tuple (Sphinx stores lists) + code_classes_raw = getattr( + config, "argparse_examples_code_classes", ("highlight-console",) + ) + code_classes = ( + tuple(code_classes_raw) + if isinstance(code_classes_raw, list) + else code_classes_raw + ) + + return cls( + examples_term_suffix=getattr( + config, "argparse_examples_term_suffix", "examples" + ), + examples_base_term=getattr( + config, "argparse_examples_base_term", "examples" + ), + examples_section_title=getattr( + config, "argparse_examples_section_title", "Examples" + ), + usage_pattern=getattr(config, "argparse_usage_pattern", "usage:"), + command_prefix=getattr(config, "argparse_examples_command_prefix", "$ "), + code_language=getattr(config, "argparse_examples_code_language", "console"), + code_classes=code_classes, + usage_code_language=getattr( + config, "argparse_usage_code_language", "cli-usage" + ), + reorder_usage_before_examples=getattr( + config, "argparse_reorder_usage_before_examples", True + ), + ) + + +# Re-export for backwards compatibility and public API +__all__ = [ + "CleanArgParseDirective", + "ExemplarConfig", + "is_base_examples_term", + "is_examples_term", + "make_section_id", + "make_section_title", + "process_node", + "strip_ansi", + "transform_definition_list", +] + + +def is_examples_term(term_text: str, *, config: ExemplarConfig | None = None) -> bool: + """Check if a definition term is an examples header. + + Parameters + ---------- + term_text : str + The text content of a definition term. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + bool + True if this is an examples header. + + Examples + -------- + >>> is_examples_term("examples:") + True + >>> is_examples_term("Machine-readable output examples:") + True + >>> is_examples_term("Usage:") + False + + With custom configuration: + + >>> custom_config = ExemplarConfig(examples_term_suffix="demos") + >>> is_examples_term("demos:", config=custom_config) + True + >>> is_examples_term("examples:", config=custom_config) + False + """ + config = config or ExemplarConfig() + return term_text.lower().rstrip(":").endswith(config.examples_term_suffix) + + +def is_base_examples_term( + term_text: str, *, config: ExemplarConfig | None = None +) -> bool: + """Check if a definition term is a base "examples:" header (no prefix). + + Parameters + ---------- + term_text : str + The text content of a definition term. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + bool + True if this is just "examples:" with no category prefix. + + Examples + -------- + >>> is_base_examples_term("examples:") + True + >>> is_base_examples_term("Examples") + True + >>> is_base_examples_term("Field-scoped examples:") + False + + With custom configuration: + + >>> custom_config = ExemplarConfig(examples_base_term="demos") + >>> is_base_examples_term("demos:", config=custom_config) + True + >>> is_base_examples_term("examples:", config=custom_config) + False + """ + config = config or ExemplarConfig() + return term_text.lower().rstrip(":").strip() == config.examples_base_term + + +def make_section_id( + term_text: str, + counter: int = 0, + *, + is_subsection: bool = False, + page_prefix: str = "", + config: ExemplarConfig | None = None, +) -> str: + """Generate a section ID from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + counter : int + Counter for uniqueness if multiple examples sections exist. + is_subsection : bool + If True, omit "-examples" suffix for cleaner nested IDs. + page_prefix : str + Optional prefix from the page name (e.g., "sync", "add") to ensure + uniqueness across different documentation pages. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + str + A normalized section ID. + + Examples + -------- + >>> make_section_id("examples:") + 'examples' + >>> make_section_id("examples:", page_prefix="sync") + 'sync-examples' + >>> make_section_id("Machine-readable output examples:") + 'machine-readable-output-examples' + >>> make_section_id("Field-scoped examples:", is_subsection=True) + 'field-scoped' + >>> make_section_id("examples:", counter=1) + 'examples-1' + + With custom configuration: + + >>> custom_config = ExemplarConfig(examples_term_suffix="demos") + >>> make_section_id("demos:", config=custom_config) + 'demos' + >>> make_section_id("Machine-readable output demos:", config=custom_config) + 'machine-readable-output-demos' + """ + config = config or ExemplarConfig() + term_suffix = config.examples_term_suffix + + # Extract prefix before the term suffix (e.g., "Machine-readable output") + lower_text = term_text.lower().rstrip(":") + if term_suffix in lower_text: + prefix = lower_text.rsplit(term_suffix, 1)[0].strip() + # Remove trailing colon from prefix (handles ": examples" pattern) + prefix = prefix.rstrip(":").strip() + if prefix: + normalized_prefix = prefix.replace(" ", "-") + # Subsections don't need "-examples" suffix + if is_subsection: + section_id = normalized_prefix + else: + section_id = f"{normalized_prefix}-{term_suffix}" + else: + # Plain "examples" - add page prefix if provided for uniqueness + section_id = f"{page_prefix}-{term_suffix}" if page_prefix else term_suffix + else: + section_id = term_suffix + + # Add counter suffix for uniqueness + if counter > 0: + section_id = f"{section_id}-{counter}" + + return section_id + + +def make_section_title( + term_text: str, + *, + is_subsection: bool = False, + config: ExemplarConfig | None = None, +) -> str: + """Generate a section title from an examples term. + + Parameters + ---------- + term_text : str + The examples term text (e.g., "Machine-readable output: examples:") + is_subsection : bool + If True, omit "Examples" suffix for cleaner nested titles. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + str + A proper title (e.g., "Machine-readable Output Examples" or just + "Machine-Readable Output" if is_subsection=True). + + Examples + -------- + >>> make_section_title("examples:") + 'Examples' + >>> make_section_title("Machine-readable output examples:") + 'Machine-Readable Output Examples' + >>> make_section_title("Field-scoped examples:", is_subsection=True) + 'Field-Scoped' + + With custom configuration: + + >>> custom_config = ExemplarConfig( + ... examples_base_term="demos", + ... examples_term_suffix="demos", + ... examples_section_title="Demos", + ... ) + >>> make_section_title("demos:", config=custom_config) + 'Demos' + >>> make_section_title("Machine-readable output demos:", config=custom_config) + 'Machine-Readable Output Demos' + """ + config = config or ExemplarConfig() + base_term = config.examples_base_term + term_suffix = config.examples_term_suffix + section_title = config.examples_section_title + + # Remove trailing colon and normalize + text = term_text.rstrip(":").strip() + # Handle base term case (e.g., "examples:") + if text.lower() == base_term: + return section_title + + # Extract the prefix (category name) before the term suffix + lower = text.lower() + colon_suffix = f": {term_suffix}" + space_suffix = f" {term_suffix}" + if lower.endswith(colon_suffix): + prefix = text[: -len(colon_suffix)] + elif lower.endswith(space_suffix): + prefix = text[: -len(space_suffix)] + else: + prefix = text + + # Title case the prefix + titled_prefix = prefix.title() + + # For subsections, just use the prefix (cleaner nested titles) + if is_subsection: + return titled_prefix + + # For top-level sections, append the section title + return f"{titled_prefix} {section_title}" + + +def _create_example_section( + term_text: str, + def_node: nodes.definition, + *, + is_subsection: bool = False, + page_prefix: str = "", + config: ExemplarConfig | None = None, +) -> nodes.section: + """Create a section node for an examples item. + + Parameters + ---------- + term_text : str + The examples term text. + def_node : nodes.definition + The definition node containing example commands. + is_subsection : bool + If True, create a subsection with simpler title/id. + page_prefix : str + Optional prefix from the page name for unique section IDs. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + nodes.section + A section node with title and code blocks. + + Examples + -------- + Create a section from a definition node containing example commands: + + >>> from docutils import nodes + >>> def_node = nodes.definition() + >>> def_node += nodes.paragraph(text="myapp sync") + >>> section = _create_example_section("examples:", def_node) + >>> section["ids"] + ['examples'] + >>> section[0].astext() + 'Examples' + + With a page prefix for uniqueness across documentation pages: + + >>> section = _create_example_section("examples:", def_node, page_prefix="sync") + >>> section["ids"] + ['sync-examples'] + + Category-prefixed examples create descriptive section IDs: + + >>> section = _create_example_section("Machine-readable output examples:", def_node) + >>> section["ids"] + ['machine-readable-output-examples'] + >>> section[0].astext() + 'Machine-Readable Output Examples' + """ + config = config or ExemplarConfig() + section_id = make_section_id( + term_text, is_subsection=is_subsection, page_prefix=page_prefix, config=config + ) + section_title = make_section_title( + term_text, is_subsection=is_subsection, config=config + ) + + section = nodes.section() + section["ids"] = [section_id] + section["names"] = [nodes.fully_normalize_name(section_title)] + + title = nodes.title(text=section_title) + section += title + + # Extract commands from definition and create separate code blocks + def_text = strip_ansi(def_node.astext()) + for line in def_text.split("\n"): + line = line.strip() + if line: + code_block = nodes.literal_block( + text=f"{config.command_prefix}{line}", + classes=list(config.code_classes), + ) + code_block["language"] = config.code_language + section += code_block + + return section + + +def transform_definition_list( + dl_node: nodes.definition_list, + *, + page_prefix: str = "", + config: ExemplarConfig | None = None, +) -> list[nodes.Node]: + """Transform a definition list, converting examples items to code blocks. + + If there's a base "examples:" item followed by category-specific examples + (e.g., "Field-scoped: examples:"), the categories are nested under the + parent Examples section for cleaner ToC structure. + + Parameters + ---------- + dl_node : nodes.definition_list + A definition list node. + page_prefix : str + Optional prefix from the page name for unique section IDs. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + list[nodes.Node] + Transformed nodes - code blocks for examples, original for others. + + Note + ---- + **Intentional reordering behavior:** This function always emits non-example + items (preamble text, descriptions, etc.) before example sections, regardless + of their original position in the definition list. This "flush first" approach + groups conceptually related content: introductory material appears before + examples, even if the source document interleaves them. This produces cleaner + documentation structure where descriptions introduce their examples. + + If you need to preserve the original interleaved order, you would need to + modify this function to track item positions during the first pass. + """ + config = config or ExemplarConfig() + + # First pass: collect examples and non-examples items separately + example_items: list[tuple[str, nodes.definition]] = [] # (term_text, def_node) + non_example_items: list[nodes.Node] = [] + base_examples_index: int | None = None + + for item in dl_node.children: + if not isinstance(item, nodes.definition_list_item): + continue + + # Get the term and definition + term_node = None + def_node = None + for child in item.children: + if isinstance(child, nodes.term): + term_node = child + elif isinstance(child, nodes.definition): + def_node = child + + if term_node is None or def_node is None: + non_example_items.append(item) + continue + + term_text = strip_ansi(term_node.astext()) + + if is_examples_term(term_text, config=config): + if is_base_examples_term(term_text, config=config): + base_examples_index = len(example_items) + example_items.append((term_text, def_node)) + else: + non_example_items.append(item) + + # Build result nodes + result_nodes: list[nodes.Node] = [] + + # Emit non-example items first (see docstring Note on reordering behavior) + if non_example_items: + new_dl = nodes.definition_list() + new_dl.extend(non_example_items) + result_nodes.append(new_dl) + + # Determine nesting strategy + # Nest if: there's a base "examples:" AND at least one other example category + should_nest = base_examples_index is not None and len(example_items) > 1 + + if should_nest and base_examples_index is not None: + # Create parent "Examples" section + base_term, base_def = example_items[base_examples_index] + parent_section = _create_example_section( + base_term, + base_def, + is_subsection=False, + page_prefix=page_prefix, + config=config, + ) + + # Add other examples as nested subsections + for i, (term_text, def_node) in enumerate(example_items): + if i == base_examples_index: + continue # Skip the base (already used as parent) + subsection = _create_example_section( + term_text, + def_node, + is_subsection=True, + page_prefix=page_prefix, + config=config, + ) + parent_section += subsection + + result_nodes.append(parent_section) + else: + # No nesting - create flat sections (backwards compatible) + for term_text, def_node in example_items: + section = _create_example_section( + term_text, + def_node, + is_subsection=False, + page_prefix=page_prefix, + config=config, + ) + result_nodes.append(section) + + return result_nodes + + +def process_node( + node: nodes.Node, + *, + page_prefix: str = "", + config: ExemplarConfig | None = None, +) -> nodes.Node | list[nodes.Node]: + """Process a node: strip ANSI codes and transform examples. + + Parameters + ---------- + node : nodes.Node + A docutils node to process. + page_prefix : str + Optional prefix from the page name for unique section IDs. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + nodes.Node | list[nodes.Node] + The processed node(s). + """ + config = config or ExemplarConfig() + + # Handle text nodes - strip ANSI + if isinstance(node, nodes.Text): + cleaned = strip_ansi(node.astext()) + if cleaned != node.astext(): + return nodes.Text(cleaned) + return node + + # Handle definition lists - transform examples + if isinstance(node, nodes.definition_list): + # Check if any items are examples + has_examples = False + for item in node.children: + if isinstance(item, nodes.definition_list_item): + for child in item.children: + if isinstance(child, nodes.term) and is_examples_term( + strip_ansi(child.astext()), config=config + ): + has_examples = True + break + if has_examples: + break + + if has_examples: + return transform_definition_list( + node, page_prefix=page_prefix, config=config + ) + + # Handle literal_block nodes - strip ANSI and apply usage highlighting + if isinstance(node, nodes.literal_block): + text = strip_ansi(node.astext()) + needs_update = text != node.astext() + + # Check if this is a usage block (starts with configured pattern) + is_usage = text.lstrip().lower().startswith(config.usage_pattern.lower()) + + if needs_update or is_usage: + new_block = nodes.literal_block(text=text) + # Preserve attributes + for attr in ("language", "classes"): + if attr in node: + new_block[attr] = node[attr] + # Apply configured language to usage blocks + if is_usage: + new_block["language"] = config.usage_code_language + return new_block + return node + + # Handle paragraph nodes - strip ANSI and lift sections out + if isinstance(node, nodes.paragraph): + # Process children and check if any become sections + processed_children: list[nodes.Node] = [] + changed = False + has_sections = False + + for child in node.children: + if isinstance(child, nodes.Text): + cleaned = strip_ansi(child.astext()) + if cleaned != child.astext(): + processed_children.append(nodes.Text(cleaned)) + changed = True + else: + processed_children.append(child) + else: + result = process_node(child, page_prefix=page_prefix, config=config) + if isinstance(result, list): + processed_children.extend(result) + changed = True + # Check if any results are sections + if any(isinstance(r, nodes.section) for r in result): + has_sections = True + elif result is not child: + processed_children.append(result) + changed = True + if isinstance(result, nodes.section): + has_sections = True + else: + processed_children.append(child) + + if not changed: + return node + + # If no sections, return a normal paragraph + if not has_sections: + new_para = nodes.paragraph() + new_para.extend(processed_children) + return new_para + + # Sections found - lift them out of the paragraph + # Return a list: [para_before, section1, section2, ..., para_after] + result_nodes: list[nodes.Node] = [] + current_para_children: list[nodes.Node] = [] + + for child in processed_children: + if isinstance(child, nodes.section): + # Flush current paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + current_para_children = [] + # Add section as a sibling + result_nodes.append(child) + else: + current_para_children.append(child) + + # Flush remaining paragraph content + if current_para_children: + para = nodes.paragraph() + para.extend(current_para_children) + result_nodes.append(para) + + return result_nodes + + # Recursively process children for other node types + if hasattr(node, "children"): + new_children: list[nodes.Node] = [] + children_changed = False + for child in node.children: + result = process_node(child, page_prefix=page_prefix, config=config) + if isinstance(result, list): + new_children.extend(result) + children_changed = True + elif result is not child: + new_children.append(result) + children_changed = True + else: + new_children.append(child) + if children_changed: + node[:] = new_children # type: ignore[index] + + return node + + +def _is_usage_block(node: nodes.Node, *, config: ExemplarConfig | None = None) -> bool: + """Check if a node is a usage literal block. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + bool + True if this is a usage block (literal_block starting with usage pattern). + + Examples + -------- + >>> from docutils import nodes + >>> _is_usage_block(nodes.literal_block(text="usage: cmd [-h]")) + True + >>> _is_usage_block(nodes.literal_block(text="Usage: myapp sync")) + True + >>> _is_usage_block(nodes.literal_block(text=" usage: cmd")) + True + >>> _is_usage_block(nodes.literal_block(text="some other text")) + False + >>> _is_usage_block(nodes.paragraph(text="usage: cmd")) + False + >>> _is_usage_block(nodes.section()) + False + + With custom configuration: + + >>> custom_config = ExemplarConfig(usage_pattern="synopsis:") + >>> _is_usage_block(nodes.literal_block(text="synopsis: cmd"), config=custom_config) + True + >>> _is_usage_block(nodes.literal_block(text="usage: cmd"), config=custom_config) + False + """ + config = config or ExemplarConfig() + if not isinstance(node, nodes.literal_block): + return False + text = node.astext() + return text.lstrip().lower().startswith(config.usage_pattern.lower()) + + +def _is_usage_section(node: nodes.Node) -> bool: + """Check if a node is a usage section. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + + Returns + ------- + bool + True if this is a section with "usage" in its ID. + + Examples + -------- + >>> from docutils import nodes + >>> section = nodes.section() + >>> section["ids"] = ["usage"] + >>> _is_usage_section(section) + True + >>> section2 = nodes.section() + >>> section2["ids"] = ["sync-usage"] + >>> _is_usage_section(section2) + True + >>> section3 = nodes.section() + >>> section3["ids"] = ["options"] + >>> _is_usage_section(section3) + False + >>> _is_usage_section(nodes.paragraph()) + False + """ + if not isinstance(node, nodes.section): + return False + ids: list[str] = node.get("ids", []) + return any(id_str == "usage" or id_str.endswith("-usage") for id_str in ids) + + +def _is_examples_section( + node: nodes.Node, *, config: ExemplarConfig | None = None +) -> bool: + """Check if a node is an examples section. + + Parameters + ---------- + node : nodes.Node + A docutils node to check. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + bool + True if this is an examples section (section with term suffix in its ID). + + Examples + -------- + >>> from docutils import nodes + >>> section = nodes.section() + >>> section["ids"] = ["examples"] + >>> _is_examples_section(section) + True + >>> section2 = nodes.section() + >>> section2["ids"] = ["machine-readable-output-examples"] + >>> _is_examples_section(section2) + True + >>> section3 = nodes.section() + >>> section3["ids"] = ["positional-arguments"] + >>> _is_examples_section(section3) + False + >>> _is_examples_section(nodes.paragraph()) + False + >>> _is_examples_section(nodes.literal_block(text="examples")) + False + + With custom configuration: + + >>> custom_config = ExemplarConfig(examples_term_suffix="demos") + >>> section = nodes.section() + >>> section["ids"] = ["demos"] + >>> _is_examples_section(section, config=custom_config) + True + >>> section2 = nodes.section() + >>> section2["ids"] = ["examples"] + >>> _is_examples_section(section2, config=custom_config) + False + """ + config = config or ExemplarConfig() + if not isinstance(node, nodes.section): + return False + ids: list[str] = node.get("ids", []) + return any(config.examples_term_suffix in id_str.lower() for id_str in ids) + + +def _reorder_nodes( + processed: list[nodes.Node], *, config: ExemplarConfig | None = None +) -> list[nodes.Node]: + """Reorder nodes so usage sections/blocks appear before examples sections. + + This ensures the CLI usage synopsis appears above examples in the + documentation, making it easier to understand command syntax before + seeing example invocations. + + The function handles both: + - Usage as literal_block (legacy format from older renderer) + - Usage as section#usage (new format with TOC support) + + Parameters + ---------- + processed : list[nodes.Node] + List of processed docutils nodes. + config : ExemplarConfig | None + Optional configuration. If None, uses default ExemplarConfig(). + + Returns + ------- + list[nodes.Node] + Reordered nodes with usage before examples (if enabled). + + Examples + -------- + >>> from docutils import nodes + + Create test nodes: + + >>> desc = nodes.paragraph(text="Description") + >>> examples = nodes.section() + >>> examples["ids"] = ["examples"] + >>> usage = nodes.literal_block(text="usage: cmd [-h]") + >>> args = nodes.section() + >>> args["ids"] = ["arguments"] + + When usage appears after examples, it gets moved before: + + >>> result = _reorder_nodes([desc, examples, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + When no examples exist, order is unchanged: + + >>> result = _reorder_nodes([desc, usage, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section'] + + When usage already before examples, order is preserved: + + >>> result = _reorder_nodes([desc, usage, examples, args]) + >>> [type(n).__name__ for n in result] + ['paragraph', 'literal_block', 'section', 'section'] + + Empty list returns empty: + + >>> _reorder_nodes([]) + [] + + Usage sections (with TOC heading) are also handled: + + >>> usage_section = nodes.section() + >>> usage_section["ids"] = ["usage"] + >>> result = _reorder_nodes([desc, examples, usage_section, args]) + >>> [n.get("ids", []) for n in result if isinstance(n, nodes.section)] + [['usage'], ['examples'], ['arguments']] + + Reordering can be disabled via config: + + >>> no_reorder_config = ExemplarConfig(reorder_usage_before_examples=False) + >>> result = _reorder_nodes([desc, examples, usage, args], config=no_reorder_config) + >>> [type(n).__name__ for n in result] + ['paragraph', 'section', 'literal_block', 'section'] + """ + config = config or ExemplarConfig() + + # If reordering is disabled, return as-is + if not config.reorder_usage_before_examples: + return processed + + # First pass: check if there are any examples sections + has_examples = any(_is_examples_section(node, config=config) for node in processed) + if not has_examples: + # No examples, preserve original order + return processed + + usage_nodes: list[nodes.Node] = [] + examples_sections: list[nodes.Node] = [] + other_before_examples: list[nodes.Node] = [] + other_after_examples: list[nodes.Node] = [] + + seen_examples = False + for node in processed: + # Check for both usage block (literal_block) and usage section + if _is_usage_block(node, config=config) or _is_usage_section(node): + usage_nodes.append(node) + elif _is_examples_section(node, config=config): + examples_sections.append(node) + seen_examples = True + elif not seen_examples: + other_before_examples.append(node) + else: + other_after_examples.append(node) + + # Order: before_examples β†’ usage β†’ examples β†’ after_examples + return ( + other_before_examples + usage_nodes + examples_sections + other_after_examples + ) + + +def _extract_sections_from_container( + container: nodes.Node, +) -> tuple[nodes.Node, list[nodes.section]]: + """Extract section nodes from a container, returning modified container. + + This function finds any section nodes that are children of the container + (typically argparse_program), removes them from the container, and returns + them separately so they can be made siblings. + + This is needed because Sphinx's TocTreeCollector only discovers sections + that are direct children of the document or properly nested in the section + hierarchy - sections inside arbitrary div containers are invisible to TOC. + + Parameters + ---------- + container : nodes.Node + A container node (typically argparse_program) that may contain sections. + + Returns + ------- + tuple[nodes.Node, list[nodes.section]] + A tuple of (modified_container, extracted_sections). + + Examples + -------- + >>> from docutils import nodes + >>> from sphinx_argparse_neo.nodes import argparse_program + >>> container = argparse_program() + >>> para = nodes.paragraph(text="Description") + >>> examples = nodes.section() + >>> examples["ids"] = ["examples"] + >>> container += para + >>> container += examples + >>> modified, extracted = _extract_sections_from_container(container) + >>> len(modified.children) + 1 + >>> len(extracted) + 1 + >>> extracted[0]["ids"] + ['examples'] + """ + if not hasattr(container, "children"): + return container, [] + + extracted_sections: list[nodes.section] = [] + remaining_children: list[nodes.Node] = [] + + for child in container.children: + if isinstance(child, nodes.section): + extracted_sections.append(child) + else: + remaining_children.append(child) + + # Update container with remaining children only + container[:] = remaining_children # type: ignore[index] + + return container, extracted_sections + + +class CleanArgParseDirective(ArgparseDirective): + """ArgParse directive that strips ANSI codes and formats examples.""" + + def run(self) -> list[nodes.Node]: + """Run the directive, clean output, format examples, and reorder. + + The processing pipeline: + 1. Run base directive to get initial nodes + 2. Load configuration from Sphinx config + 3. Process each node (strip ANSI, transform examples definition lists) + 4. Extract sections from inside argparse_program containers + 5. Reorder so usage appears before examples (if enabled) + """ + result = super().run() + + # Load configuration from Sphinx + config = ExemplarConfig.from_sphinx_config(self.env.config) + + # Extract page name for unique section IDs across different CLI pages + page_prefix = "" + if hasattr(self.state, "document"): + settings = self.state.document.settings + if hasattr(settings, "env") and hasattr(settings.env, "docname"): + # docname is like "cli/sync" - extract "sync" + docname = settings.env.docname + page_prefix = docname.split("/")[-1] + + processed: list[nodes.Node] = [] + for node in result: + processed_node = process_node(node, page_prefix=page_prefix, config=config) + if isinstance(processed_node, list): + processed.extend(processed_node) + else: + processed.append(processed_node) + + # Extract sections from inside argparse_program containers + # This is needed because sections inside divs are invisible to Sphinx TOC + flattened: list[nodes.Node] = [] + for node in processed: + # Check if this is an argparse_program (or similar container) + # that might have sections inside + node_class_name = type(node).__name__ + if node_class_name == "argparse_program": + modified, extracted = _extract_sections_from_container(node) + flattened.append(modified) + flattened.extend(extracted) + else: + flattened.append(node) + + # Reorder: usage sections/blocks before examples sections + return _reorder_nodes(flattened, config=config) + + +def setup(app: Sphinx) -> dict[str, t.Any]: + """Register the clean argparse directive, lexers, and CLI roles. + + Configuration Options + --------------------- + The following configuration options can be set in conf.py: + + ``argparse_examples_term_suffix`` : str (default: "examples") + Term must end with this string to be treated as examples header. + + ``argparse_examples_base_term`` : str (default: "examples") + Exact match for the base examples section. + + ``argparse_examples_section_title`` : str (default: "Examples") + Title used for the base examples section. + + ``argparse_usage_pattern`` : str (default: "usage:") + Text must start with this to be treated as a usage block. + + ``argparse_examples_command_prefix`` : str (default: "$ ") + Prefix added to each command line in examples code blocks. + + ``argparse_examples_code_language`` : str (default: "console") + Language identifier for examples code blocks. + + ``argparse_examples_code_classes`` : list[str] (default: ["highlight-console"]) + CSS classes added to examples code blocks. + + ``argparse_usage_code_language`` : str (default: "cli-usage") + Language identifier for usage blocks. + + ``argparse_reorder_usage_before_examples`` : bool (default: True) + Whether to reorder nodes so usage appears before examples. + + Parameters + ---------- + app : Sphinx + The Sphinx application object. + + Returns + ------- + dict + Extension metadata. + """ + # Load the base sphinx_argparse_neo extension first + app.setup_extension("sphinx_argparse_neo") + + # Register configuration options + app.add_config_value("argparse_examples_term_suffix", "examples", "html") + app.add_config_value("argparse_examples_base_term", "examples", "html") + app.add_config_value("argparse_examples_section_title", "Examples", "html") + app.add_config_value("argparse_usage_pattern", "usage:", "html") + app.add_config_value("argparse_examples_command_prefix", "$ ", "html") + app.add_config_value("argparse_examples_code_language", "console", "html") + app.add_config_value( + "argparse_examples_code_classes", ["highlight-console"], "html" + ) + app.add_config_value("argparse_usage_code_language", "cli-usage", "html") + app.add_config_value("argparse_reorder_usage_before_examples", True, "html") + + # Override the argparse directive with our enhanced version + app.add_directive("argparse", CleanArgParseDirective, override=True) + + # Register CLI usage lexer for usage block highlighting + from cli_usage_lexer import CLIUsageLexer + + app.add_lexer("cli-usage", CLIUsageLexer) + + # Register argparse lexers for help output highlighting + from argparse_lexer import ( + ArgparseHelpLexer, + ArgparseLexer, + ArgparseUsageLexer, + ) + + app.add_lexer("argparse", ArgparseLexer) + app.add_lexer("argparse-usage", ArgparseUsageLexer) + app.add_lexer("argparse-help", ArgparseHelpLexer) + + # Register CLI inline roles for documentation + from argparse_roles import register_roles + + register_roles() + + return {"version": "4.0", "parallel_read_safe": True} diff --git a/docs/_ext/argparse_lexer.py b/docs/_ext/argparse_lexer.py new file mode 100644 index 0000000..28f8689 --- /dev/null +++ b/docs/_ext/argparse_lexer.py @@ -0,0 +1,429 @@ +"""Pygments lexers for argparse help output. + +This module provides custom Pygments lexers for highlighting argparse-generated +command-line help text, including usage lines, section headers, and full help output. + +Three lexer classes are provided: +- ArgparseUsageLexer: For usage lines only +- ArgparseHelpLexer: For full -h output (delegates usage to ArgparseUsageLexer) +- ArgparseLexer: Smart auto-detecting wrapper +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups, include +from pygments.token import Generic, Name, Operator, Punctuation, Text, Whitespace + + +class ArgparseUsageLexer(RegexLexer): + """Lexer for argparse usage lines only. + + Handles patterns like: + - usage: PROG [-h] [--foo FOO] bar {a,b,c} + - Mutually exclusive: [-a | -b], (--foo | --bar) + - Choices: {json,yaml,table} + - Variadic: FILE ..., [FILE ...], [--foo [FOO]] + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseUsageLexer() + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + >>> tokens[2] + (Token.Name.Label, 'cmd') + """ + + name = "Argparse Usage" + aliases = ["argparse-usage"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse-usage"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" at start of line - then look for program name + ( + r"^(usage:)(\s+)", + bygroups(Generic.Heading, Whitespace), # type: ignore[no-untyped-call] + "after_usage", + ), + # Continuation lines (leading whitespace for wrapped usage) + (r"^(\s+)(?=\S)", Whitespace), + include("inline"), + ], + "after_usage": [ + # Whitespace + (r"\s+", Whitespace), + # Program name (first lowercase word after usage:) + (r"\b[a-z][-a-z0-9]*\b", Name.Label, "usage_body"), + # Fallback to inline if something unexpected + include("inline"), + ], + "usage_body": [ + # Whitespace + (r"\s+", Whitespace), + # Ellipsis for variadic args (before other patterns) + (r"\.\.\.", Punctuation), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Opening brace - enter choices state + (r"\{", Punctuation, "choices"), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Opening paren - enter required mutex state + (r"\(", Punctuation, "required"), + # Closing paren (fallback for unmatched) + (r"\)", Punctuation), + # Choice separator (pipe) for mutex groups + (r"\|", Operator), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Subcommand/positional names (Name.Function for distinct styling) + (r"\b[a-z][-a-z0-9_]*\b", Name.Function), + # Catch-all for any other text + (r"[^\s\[\]|(){},]+", Text), + ], + "inline": [ + # Whitespace + (r"\s+", Whitespace), + # Ellipsis for variadic args (before other patterns) + (r"\.\.\.", Punctuation), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Opening brace - enter choices state + (r"\{", Punctuation, "choices"), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Opening paren - enter required mutex state + (r"\(", Punctuation, "required"), + # Closing paren (fallback for unmatched) + (r"\)", Punctuation), + # Choice separator (pipe) for mutex groups + (r"\|", Operator), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Positional/command names (lowercase with dashes) + (r"\b[a-z][-a-z0-9_]*\b", Name.Label), + # Catch-all for any other text + (r"[^\s\[\]|(){},]+", Text), + ], + "optional": [ + # Nested optional bracket + (r"\[", Punctuation, "#push"), + # End optional + (r"\]", Punctuation, "#pop"), + # Contents use usage_body rules (subcommands are green) + include("usage_body"), + ], + "required": [ + # Nested required paren + (r"\(", Punctuation, "#push"), + # End required + (r"\)", Punctuation, "#pop"), + # Contents use usage_body rules (subcommands are green) + include("usage_body"), + ], + "choices": [ + # Choice values (comma-separated inside braces) + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + # Comma separator + (r",", Punctuation), + # End choices + (r"\}", Punctuation, "#pop"), + # Whitespace + (r"\s+", Whitespace), + ], + } + + +class ArgparseHelpLexer(RegexLexer): + """Lexer for full argparse -h help output. + + Handles: + - Usage lines (delegates to ArgparseUsageLexer patterns) + - Section headers (positional arguments:, options:, etc.) + - Option entries with help text + - Indented descriptions + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseHelpLexer() + >>> tokens = list(lexer.get_tokens("positional arguments:")) + >>> any(t[0] == Token.Generic.Subheading for t in tokens) + True + >>> tokens = list(lexer.get_tokens(" -h, --help show help")) + >>> any(t[0] == Token.Name.Attribute for t in tokens) + True + """ + + name = "Argparse Help" + aliases = ["argparse-help"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse-help"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" line - switch to after_usage to find program name + ( + r"^(usage:)(\s+)", + bygroups(Generic.Heading, Whitespace), # type: ignore[no-untyped-call] + "after_usage", + ), + # Section headers (e.g., "positional arguments:", "options:") + (r"^([a-zA-Z][-a-zA-Z0-9_ ]*:)\s*$", Generic.Subheading), + # Option entry lines (indented with spaces/tabs, not just newlines) + (r"^([ \t]+)", Whitespace, "option_line"), + # Continuation of usage (leading spaces/tabs followed by content) + (r"^([ \t]+)(?=\S)", Whitespace), + # Anything else (must match at least one char to avoid infinite loop) + (r".+\n?", Text), + # Standalone newlines + (r"\n", Whitespace), + ], + "after_usage": [ + # Whitespace + (r"\s+", Whitespace), + # Program name (first lowercase word after usage:) + (r"\b[a-z][-a-z0-9]*\b", Name.Label, "usage"), + # Fallback to usage if something unexpected + include("usage_inline"), + ], + "usage": [ + # End of usage on blank line or section header + (r"\n(?=[a-zA-Z][-a-zA-Z0-9_ ]*:\s*$)", Text, "#pop:2"), + (r"\n(?=\n)", Text, "#pop:2"), + # Usage content - use usage_inline rules (subcommands are green) + include("usage_inline"), + # Line continuation + (r"\n", Text), + ], + "usage_inline": [ + # Whitespace + (r"\s+", Whitespace), + # Ellipsis for variadic args + (r"\.\.\.", Punctuation), + # Long options with = value + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with value + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9_]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Choices in braces + (r"\{", Punctuation, "choices"), + # Optional brackets + (r"\[", Punctuation, "optional"), + (r"\]", Punctuation), + # Required parens (mutex) + (r"\(", Punctuation, "required"), + (r"\)", Punctuation), + # Pipe for mutex + (r"\|", Operator), + # UPPERCASE metavars + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Subcommand/positional names (Name.Function for distinct styling) + (r"\b[a-z][-a-z0-9_]*\b", Name.Function), + # Other text + (r"[^\s\[\]|(){},\n]+", Text), + ], + "option_line": [ + # Short option with comma (e.g., "-h, --help") + ( + r"(-[a-zA-Z0-9])(,)(\s*)(--[a-zA-Z0-9][-a-zA-Z0-9]*)", + bygroups(Name.Attribute, Punctuation, Whitespace, Name.Tag), # type: ignore[no-untyped-call] + ), + # Long options with = value + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options with space-separated metavar + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(\s+)([A-Z][A-Z0-9_]+)", + bygroups(Name.Tag, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with metavar + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]+)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # Choices in braces + (r"\{", Punctuation, "option_choices"), + # Help text (everything after double space or large gap) + (r"([ \t]{2,})(.+)$", bygroups(Whitespace, Text)), # type: ignore[no-untyped-call] + # End of line - MUST come before \s+ to properly pop on newlines + (r"\n", Text, "#pop"), + # Other whitespace (spaces/tabs only, not newlines) + (r"[ \t]+", Whitespace), + # UPPERCASE metavars + (r"\b[A-Z][A-Z0-9_]*\b", Name.Variable), + # Anything else on the line + (r"[^\s\n]+", Text), + ], + "optional": [ + (r"\[", Punctuation, "#push"), + (r"\]", Punctuation, "#pop"), + include("usage_inline"), + ], + "required": [ + (r"\(", Punctuation, "#push"), + (r"\)", Punctuation, "#pop"), + include("usage_inline"), + ], + "choices": [ + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + (r",", Punctuation), + (r"\}", Punctuation, "#pop"), + (r"\s+", Whitespace), + ], + "option_choices": [ + (r"[a-zA-Z0-9][-a-zA-Z0-9_]*", Name.Constant), + (r",", Punctuation), + (r"\}", Punctuation, "#pop"), + (r"\s+", Whitespace), + ], + } + + +class ArgparseLexer(ArgparseHelpLexer): + """Smart auto-detecting lexer for argparse output. + + Inherits from ArgparseHelpLexer to properly handle Pygments' metaclass + token processing. Using inheritance (not token dict copying) avoids + shared mutable state that causes memory corruption. + + This is the recommended lexer for general argparse highlighting. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = ArgparseLexer() + + Usage line detection: + + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + + Section header detection (Pygments appends newline to input): + + >>> tokens = list(lexer.get_tokens("positional arguments:")) + >>> any(t[0] == Token.Generic.Subheading for t in tokens) + True + + Option highlighting in option line context: + + >>> tokens = list(lexer.get_tokens(" -h, --help show help")) + >>> any(t[0] == Token.Name.Attribute for t in tokens) + True + """ + + name = "Argparse" + aliases = ["argparse"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-argparse"] # noqa: RUF012 + + # Tokens inherited from ArgparseHelpLexer - do NOT redefine or copy + + +def tokenize_argparse(text: str) -> list[tuple[str, str]]: + """Tokenize argparse text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + Argparse help or usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_argparse("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + + >>> result = tokenize_argparse("positional arguments:") + >>> any('Token.Generic.Subheading' in t[0] for t in result) + True + """ + lexer = ArgparseLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +def tokenize_usage(text: str) -> list[tuple[str, str]]: + """Tokenize usage text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + CLI usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_usage("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + >>> result[4] + ('Token.Punctuation', '[') + >>> result[5] + ('Token.Name.Attribute', '-h') + """ + lexer = ArgparseUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_ext/argparse_roles.py b/docs/_ext/argparse_roles.py new file mode 100644 index 0000000..86e5459 --- /dev/null +++ b/docs/_ext/argparse_roles.py @@ -0,0 +1,370 @@ +"""Docutils inline roles for CLI/argparse highlighting. + +This module provides custom docutils roles for inline highlighting of CLI +elements in reStructuredText and MyST documentation. + +Available roles: +- :cli-option: - CLI options (--verbose, -h) +- :cli-metavar: - Metavar placeholders (FILE, PATH) +- :cli-command: - Command names (sync, add) +- :cli-default: - Default values (None, "default") +- :cli-choice: - Choice values (json, yaml) +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes +from docutils.parsers.rst import roles + +if t.TYPE_CHECKING: + from docutils.parsers.rst.states import Inliner + + +def normalize_options(options: dict[str, t.Any] | None) -> dict[str, t.Any]: + """Normalize role options, converting None to empty dict. + + Parameters + ---------- + options : dict | None + Options passed to the role. + + Returns + ------- + dict + Normalized options dict (never None). + + Examples + -------- + >>> normalize_options(None) + {} + >>> normalize_options({"class": "custom"}) + {'class': 'custom'} + """ + return options if options is not None else {} + + +def cli_option_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI options like --foo or -h. + + Generates a literal node with appropriate CSS classes for styling. + Long options (--foo) get 'cli-option-long', short options (-h) get + 'cli-option-short'. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role (has .reporter, .document). + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`--verbose`", "--verbose", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-option', 'cli-option-long'] + + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`-h`", "-h", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-option', 'cli-option-short'] + + >>> node_list, messages = cli_option_role( + ... "cli-option", ":cli-option:`--no-color`", "--no-color", + ... 1, None + ... ) + >>> node_list[0].astext() + '--no-color' + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-option"]) + + if text.startswith("--"): + node["classes"].append("cli-option-long") + elif text.startswith("-"): + node["classes"].append("cli-option-short") + + return [node], [] + + +def cli_metavar_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI metavar placeholders like FILE or PATH. + + Generates a literal node with 'cli-metavar' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_metavar_role( + ... "cli-metavar", ":cli-metavar:`FILE`", "FILE", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-metavar'] + >>> node_list[0].astext() + 'FILE' + + >>> node_list, messages = cli_metavar_role( + ... "cli-metavar", ":cli-metavar:`PATH`", "PATH", + ... 1, None + ... ) + >>> "cli-metavar" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-metavar"]) + return [node], [] + + +def cli_command_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI command names like sync or add. + + Generates a literal node with 'cli-command' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_command_role( + ... "cli-command", ":cli-command:`sync`", "sync", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-command'] + >>> node_list[0].astext() + 'sync' + + >>> node_list, messages = cli_command_role( + ... "cli-command", ":cli-command:`myapp`", "myapp", + ... 1, None + ... ) + >>> "cli-command" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-command"]) + return [node], [] + + +def cli_default_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI default values like None or "default". + + Generates a literal node with 'cli-default' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_default_role( + ... "cli-default", ":cli-default:`None`", "None", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-default'] + >>> node_list[0].astext() + 'None' + + >>> node_list, messages = cli_default_role( + ... "cli-default", ':cli-default:`"auto"`', '"auto"', + ... 1, None + ... ) + >>> "cli-default" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-default"]) + return [node], [] + + +def cli_choice_role( + name: str, + rawtext: str, + text: str, + lineno: int, + inliner: Inliner | None, + options: dict[str, t.Any] | None = None, + content: list[str] | None = None, +) -> tuple[list[nodes.Node], list[nodes.system_message]]: + """Role for CLI choice values like json or yaml. + + Generates a literal node with 'cli-choice' CSS class for styling. + + Parameters + ---------- + name : str + Local name of the role used in document. + rawtext : str + Full interpreted text including role markup. + text : str + Content between backticks. + lineno : int + Line number. + inliner : Inliner | None + Object that called the role. + options : dict | None + Options from role directive. + content : list | None + Content from role directive. + + Returns + ------- + tuple[list[nodes.Node], list[nodes.system_message]] + Nodes to insert and any messages. + + Examples + -------- + >>> node_list, messages = cli_choice_role( + ... "cli-choice", ":cli-choice:`json`", "json", + ... 1, None + ... ) + >>> node_list[0]["classes"] + ['cli-choice'] + >>> node_list[0].astext() + 'json' + + >>> node_list, messages = cli_choice_role( + ... "cli-choice", ":cli-choice:`yaml`", "yaml", + ... 1, None + ... ) + >>> "cli-choice" in node_list[0]["classes"] + True + """ + options = normalize_options(options) + node = nodes.literal(rawtext, text, classes=["cli-choice"]) + return [node], [] + + +def register_roles() -> None: + """Register all CLI roles with docutils. + + This function registers the following roles: + - cli-option: For CLI options (--verbose, -h) + - cli-metavar: For metavar placeholders (FILE, PATH) + - cli-command: For command names (sync, add) + - cli-default: For default values (None, "default") + - cli-choice: For choice values (json, yaml) + + Examples + -------- + >>> register_roles() + >>> # Roles are now available in docutils RST parsing + """ + roles.register_local_role("cli-option", cli_option_role) # type: ignore[arg-type] + roles.register_local_role("cli-metavar", cli_metavar_role) # type: ignore[arg-type] + roles.register_local_role("cli-command", cli_command_role) # type: ignore[arg-type] + roles.register_local_role("cli-default", cli_default_role) # type: ignore[arg-type] + roles.register_local_role("cli-choice", cli_choice_role) # type: ignore[arg-type] diff --git a/docs/_ext/cli_usage_lexer.py b/docs/_ext/cli_usage_lexer.py new file mode 100644 index 0000000..40170e3 --- /dev/null +++ b/docs/_ext/cli_usage_lexer.py @@ -0,0 +1,115 @@ +"""Pygments lexer for CLI usage/help output. + +This module provides a custom Pygments lexer for highlighting command-line +usage text typically generated by argparse, getopt, or similar libraries. +""" + +from __future__ import annotations + +from pygments.lexer import RegexLexer, bygroups, include +from pygments.token import Generic, Name, Operator, Punctuation, Text, Whitespace + + +class CLIUsageLexer(RegexLexer): + """Lexer for CLI usage/help text (argparse, etc.). + + Highlights usage patterns including options, arguments, and meta-variables. + + Examples + -------- + >>> from pygments.token import Token + >>> lexer = CLIUsageLexer() + >>> tokens = list(lexer.get_tokens("usage: cmd [-h]")) + >>> tokens[0] + (Token.Generic.Heading, 'usage:') + >>> tokens[2] + (Token.Name.Label, 'cmd') + """ + + name = "CLI Usage" + aliases = ["cli-usage", "usage"] # noqa: RUF012 + filenames: list[str] = [] # noqa: RUF012 + mimetypes = ["text/x-cli-usage"] # noqa: RUF012 + + tokens = { # noqa: RUF012 + "root": [ + # "usage:" at start of line + (r"^(usage:)(\s+)", bygroups(Generic.Heading, Whitespace)), # type: ignore[no-untyped-call] + # Continuation lines (leading whitespace for wrapped usage) + (r"^(\s+)(?=\S)", Whitespace), + include("inline"), + ], + "inline": [ + # Whitespace + (r"\s+", Whitespace), + # Long options with = value (e.g., --log-level=VALUE) + ( + r"(--[a-zA-Z0-9][-a-zA-Z0-9]*)(=)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Tag, Operator, Name.Variable), # type: ignore[no-untyped-call] + ), + # Long options standalone + (r"--[a-zA-Z0-9][-a-zA-Z0-9]*", Name.Tag), + # Short options with space-separated value (e.g., -S socket-path) + ( + r"(-[a-zA-Z0-9])(\s+)([A-Z][A-Z0-9_]*|[a-z][-a-z0-9]*)", + bygroups(Name.Attribute, Whitespace, Name.Variable), # type: ignore[no-untyped-call] + ), + # Short options standalone + (r"-[a-zA-Z0-9]", Name.Attribute), + # UPPERCASE meta-variables (COMMAND, FILE, PATH) + (r"\b[A-Z][A-Z0-9_]+\b", Name.Constant), + # Opening bracket - enter optional state + (r"\[", Punctuation, "optional"), + # Closing bracket (fallback for unmatched) + (r"\]", Punctuation), + # Choice separator (pipe) + (r"\|", Operator), + # Parentheses for grouping + (r"[()]", Punctuation), + # Positional/command names (lowercase with dashes) + (r"\b[a-z][-a-z0-9]*\b", Name.Label), + # Catch-all for any other text + (r"[^\s\[\]|()]+", Text), + ], + "optional": [ + # Nested optional bracket + (r"\[", Punctuation, "#push"), + # End optional + (r"\]", Punctuation, "#pop"), + # Contents use inline rules + include("inline"), + ], + } + + +def tokenize_usage(text: str) -> list[tuple[str, str]]: + """Tokenize usage text and return list of (token_type, value) tuples. + + Parameters + ---------- + text : str + CLI usage text to tokenize. + + Returns + ------- + list[tuple[str, str]] + List of (token_type_name, text_value) tuples. + + Examples + -------- + >>> result = tokenize_usage("usage: cmd [-h]") + >>> result[0] + ('Token.Generic.Heading', 'usage:') + >>> result[2] + ('Token.Name.Label', 'cmd') + >>> result[4] + ('Token.Punctuation', '[') + >>> result[5] + ('Token.Name.Attribute', '-h') + >>> result[6] + ('Token.Punctuation', ']') + """ + lexer = CLIUsageLexer() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] diff --git a/docs/_ext/conftest.py b/docs/_ext/conftest.py new file mode 100644 index 0000000..d026309 --- /dev/null +++ b/docs/_ext/conftest.py @@ -0,0 +1,15 @@ +"""Pytest configuration for docs/_ext doctests. + +This module sets up sys.path so that sphinx_argparse_neo and other extension +modules can be imported correctly during pytest doctest collection. +""" + +from __future__ import annotations + +import pathlib +import sys + +# Add docs/_ext to sys.path so sphinx_argparse_neo can import itself +_ext_dir = pathlib.Path(__file__).parent +if str(_ext_dir) not in sys.path: + sys.path.insert(0, str(_ext_dir)) diff --git a/docs/_ext/sphinx_argparse_neo/__init__.py b/docs/_ext/sphinx_argparse_neo/__init__.py new file mode 100644 index 0000000..5fa8dd9 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/__init__.py @@ -0,0 +1,101 @@ +"""sphinx_argparse_neo - Modern sphinx-argparse replacement. + +A Sphinx extension for documenting argparse-based CLI tools that: +- Works with Sphinx 8.x AND 9.x (no autodoc.mock dependency) +- Fixes long-standing sphinx-argparse issues (TOC pollution, heading levels) +- Provides configurable output (rubrics vs sections, flattened subcommands) +- Supports extensibility via renderer classes +- Text processing utilities (ANSI stripping) +""" + +from __future__ import annotations + +import typing as t + +from sphinx_argparse_neo.directive import ArgparseDirective +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, + depart_argparse_argument_html, + depart_argparse_group_html, + depart_argparse_program_html, + depart_argparse_subcommand_html, + depart_argparse_subcommands_html, + depart_argparse_usage_html, + visit_argparse_argument_html, + visit_argparse_group_html, + visit_argparse_program_html, + visit_argparse_subcommand_html, + visit_argparse_subcommands_html, + visit_argparse_usage_html, +) +from sphinx_argparse_neo.utils import strip_ansi + +__all__ = [ + "ArgparseDirective", + "strip_ansi", +] + +if t.TYPE_CHECKING: + from sphinx.application import Sphinx + +__version__ = "1.0.0" + + +def setup(app: Sphinx) -> dict[str, t.Any]: + """Register the argparse directive and configuration options. + + Parameters + ---------- + app : Sphinx + The Sphinx application object. + + Returns + ------- + dict[str, t.Any] + Extension metadata. + """ + # Configuration options + app.add_config_value("argparse_group_title_prefix", "", "html") + app.add_config_value("argparse_show_defaults", True, "html") + app.add_config_value("argparse_show_choices", True, "html") + app.add_config_value("argparse_show_types", True, "html") + + # Register custom nodes + app.add_node( + argparse_program, + html=(visit_argparse_program_html, depart_argparse_program_html), + ) + app.add_node( + argparse_usage, + html=(visit_argparse_usage_html, depart_argparse_usage_html), + ) + app.add_node( + argparse_group, + html=(visit_argparse_group_html, depart_argparse_group_html), + ) + app.add_node( + argparse_argument, + html=(visit_argparse_argument_html, depart_argparse_argument_html), + ) + app.add_node( + argparse_subcommands, + html=(visit_argparse_subcommands_html, depart_argparse_subcommands_html), + ) + app.add_node( + argparse_subcommand, + html=(visit_argparse_subcommand_html, depart_argparse_subcommand_html), + ) + + # Register directive + app.add_directive("argparse", ArgparseDirective) + + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/docs/_ext/sphinx_argparse_neo/compat.py b/docs/_ext/sphinx_argparse_neo/compat.py new file mode 100644 index 0000000..074990f --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/compat.py @@ -0,0 +1,291 @@ +"""Compatibility utilities for module loading. + +This module provides utilities for loading Python modules safely, +including mock handling for imports that may fail during documentation +builds. + +Unlike sphinx-argparse, this module does NOT depend on autodoc's mock +functionality, which moved in Sphinx 9.x. +""" + +from __future__ import annotations + +import contextlib +import importlib +import sys +import typing as t + +if t.TYPE_CHECKING: + import argparse + from collections.abc import Iterator + + +class MockModule: + """Simple mock for unavailable imports. + + This class provides a minimal mock that can be used as a placeholder + for modules that aren't available during documentation builds. + + Parameters + ---------- + name : str + The module name being mocked. + + Examples + -------- + >>> mock = MockModule("mypackage.submodule") + >>> mock.__name__ + 'mypackage.submodule' + >>> child = mock.child_attr + >>> child.__name__ + 'mypackage.submodule.child_attr' + >>> callable(mock.some_function) + True + >>> mock.some_function() + + """ + + def __init__(self, name: str) -> None: + """Initialize the mock module.""" + self.__name__ = name + self._name = name + + def __repr__(self) -> str: + """Return string representation.""" + return f"" + + def __getattr__(self, name: str) -> MockModule: + """Return a child mock for any attribute access. + + Parameters + ---------- + name : str + The attribute name. + + Returns + ------- + MockModule + A new mock for the child attribute. + """ + return MockModule(f"{self._name}.{name}") + + def __call__(self, *args: t.Any, **kwargs: t.Any) -> MockModule: + """Return self when called as a function. + + Parameters + ---------- + *args : t.Any + Positional arguments (ignored). + **kwargs : t.Any + Keyword arguments (ignored). + + Returns + ------- + MockModule + Self. + """ + return self + + +@contextlib.contextmanager +def mock_imports(modules: list[str]) -> Iterator[None]: + """Context manager to mock missing imports. + + This provides a simple way to temporarily add mock modules to + sys.modules, allowing imports to succeed during documentation builds + even when the actual modules aren't available. + + Parameters + ---------- + modules : list[str] + List of module names to mock. + + Yields + ------ + None + Context manager yields nothing. + + Examples + -------- + >>> import sys + >>> "fake_module" in sys.modules + False + >>> with mock_imports(["fake_module", "fake_module.sub"]): + ... import fake_module + ... fake_module.__name__ + 'fake_module' + >>> "fake_module" in sys.modules + False + """ + mocked: dict[str, MockModule] = {} + + for name in modules: + if name not in sys.modules: + mocked[name] = MockModule(name) + sys.modules[name] = mocked[name] # type: ignore[assignment] + + try: + yield + finally: + for name in mocked: + del sys.modules[name] + + +def import_module(module_name: str) -> t.Any: + """Import a module by name. + + Parameters + ---------- + module_name : str + The fully qualified module name. + + Returns + ------- + t.Any + The imported module. + + Raises + ------ + ImportError + If the module cannot be imported. + + Examples + -------- + >>> mod = import_module("argparse") + >>> hasattr(mod, "ArgumentParser") + True + """ + return importlib.import_module(module_name) + + +def get_parser_from_module( + module_name: str, + func_name: str, + mock_modules: list[str] | None = None, +) -> argparse.ArgumentParser: + """Import a module and call a function to get an ArgumentParser. + + Parameters + ---------- + module_name : str + The module containing the parser factory function. + func_name : str + The name of the function that returns an ArgumentParser. + Can be a dotted path like "Class.method". + mock_modules : list[str] | None + Optional list of module names to mock during import. + + Returns + ------- + argparse.ArgumentParser + The argument parser returned by the function. + + Raises + ------ + ImportError + If the module cannot be imported. + AttributeError + If the function is not found. + TypeError + If the function doesn't return an ArgumentParser. + + Examples + -------- + Load a parser from a module with a factory function: + + >>> import argparse + >>> import sys + >>> # Create a test module with a parser factory + >>> import types + >>> test_mod = types.ModuleType("_test_parser_mod") + >>> def _create_parser(): + ... p = argparse.ArgumentParser(prog="test") + ... return p + >>> test_mod.create_parser = _create_parser + >>> sys.modules["_test_parser_mod"] = test_mod + >>> parser = get_parser_from_module("_test_parser_mod", "create_parser") + >>> parser.prog + 'test' + >>> hasattr(parser, 'parse_args') + True + >>> del sys.modules["_test_parser_mod"] + """ + ctx = mock_imports(mock_modules) if mock_modules else contextlib.nullcontext() + + with ctx: + module = import_module(module_name) + + # Handle dotted paths like "Class.method" + obj = module + for part in func_name.split("."): + obj = getattr(obj, part) + + # Call the function if it's callable + parser = obj() if callable(obj) else obj + + # Validate the return type at runtime + import argparse as argparse_module + + if not isinstance(parser, argparse_module.ArgumentParser): + msg = ( + f"{module_name}:{func_name} returned {type(parser).__name__}, " + f"expected ArgumentParser" + ) + raise TypeError(msg) + + return parser + + +def get_parser_from_entry_point( + entry_point: str, + mock_modules: list[str] | None = None, +) -> argparse.ArgumentParser: + """Get an ArgumentParser from a setuptools-style entry point string. + + Parameters + ---------- + entry_point : str + Entry point in the format "module:function" or "module:Class.method". + mock_modules : list[str] | None + Optional list of module names to mock during import. + + Returns + ------- + argparse.ArgumentParser + The argument parser. + + Raises + ------ + ValueError + If the entry point format is invalid. + + Examples + -------- + Load a parser using entry point syntax: + + >>> import argparse + >>> import sys + >>> import types + >>> test_mod = types.ModuleType("_test_ep_mod") + >>> def _create_parser(): + ... return argparse.ArgumentParser(prog="test-ep") + >>> test_mod.create_parser = _create_parser + >>> sys.modules["_test_ep_mod"] = test_mod + >>> parser = get_parser_from_entry_point("_test_ep_mod:create_parser") + >>> parser.prog + 'test-ep' + >>> del sys.modules["_test_ep_mod"] + + Invalid format raises ValueError: + + >>> get_parser_from_entry_point("no_colon") + Traceback (most recent call last): + ... + ValueError: Invalid entry point format: 'no_colon'. Expected 'module:function' + """ + if ":" not in entry_point: + msg = f"Invalid entry point format: {entry_point!r}. Expected 'module:function'" + raise ValueError(msg) + + module_name, func_name = entry_point.split(":", 1) + return get_parser_from_module(module_name, func_name, mock_modules) diff --git a/docs/_ext/sphinx_argparse_neo/directive.py b/docs/_ext/sphinx_argparse_neo/directive.py new file mode 100644 index 0000000..3ac9643 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/directive.py @@ -0,0 +1,241 @@ +"""Sphinx directive for argparse documentation. + +This module provides the ArgparseDirective class that integrates +with Sphinx to generate documentation from ArgumentParser instances. +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes +from docutils.parsers.rst import directives +from sphinx.util.docutils import SphinxDirective +from sphinx_argparse_neo.compat import get_parser_from_module +from sphinx_argparse_neo.parser import extract_parser +from sphinx_argparse_neo.renderer import ArgparseRenderer, RenderConfig + +if t.TYPE_CHECKING: + import argparse + + +class ArgparseDirective(SphinxDirective): + """Sphinx directive for documenting argparse-based CLI tools. + + Usage + ----- + .. argparse:: + :module: myapp.cli + :func: create_parser + :prog: myapp + + Options + ------- + :module: + The Python module containing the parser factory function. + :func: + The function name that returns an ArgumentParser. + Can be a dotted path like "Class.method". + :prog: + Override the program name (optional). + :path: + Navigate to a specific subparser by path (e.g., "sync pull"). + :no-defaults: + Don't show default values (flag). + :no-description: + Don't show parser description (flag). + :no-epilog: + Don't show parser epilog (flag). + :mock-modules: + Comma-separated list of modules to mock during import. + + Examples + -------- + In RST documentation:: + + .. argparse:: + :module: myapp.cli + :func: create_parser + :prog: myapp + + :path: subcommand + """ + + has_content = True + required_arguments = 0 + optional_arguments = 0 + + option_spec: t.ClassVar[dict[str, t.Any]] = { + "module": directives.unchanged_required, + "func": directives.unchanged_required, + "prog": directives.unchanged, + "path": directives.unchanged, + "no-defaults": directives.flag, + "no-description": directives.flag, + "no-epilog": directives.flag, + "no-choices": directives.flag, + "no-types": directives.flag, + "mock-modules": directives.unchanged, + # sphinx-argparse compatibility options + "nosubcommands": directives.flag, + "nodefault": directives.flag, + "noepilog": directives.flag, + "nodescription": directives.flag, + } + + def run(self) -> list[nodes.Node]: + """Execute the directive and return docutils nodes. + + Returns + ------- + list[nodes.Node] + List of docutils nodes representing the CLI documentation. + """ + # Get required options + module_name = self.options.get("module") + func_name = self.options.get("func") + + if not module_name or not func_name: + error = self.state_machine.reporter.error( + "argparse directive requires :module: and :func: options", + line=self.lineno, + ) + return [error] + + # Parse mock modules + mock_modules: list[str] | None = None + if "mock-modules" in self.options: + mock_modules = [m.strip() for m in self.options["mock-modules"].split(",")] + + # Load the parser + try: + parser = get_parser_from_module(module_name, func_name, mock_modules) + except Exception as e: + error = self.state_machine.reporter.error( + f"Failed to load parser from {module_name}:{func_name}: {e}", + line=self.lineno, + ) + return [error] + + # Override prog if specified + if "prog" in self.options: + parser.prog = self.options["prog"] + + # Navigate to subparser if path specified + if "path" in self.options: + subparser = self._navigate_to_subparser(parser, self.options["path"]) + if subparser is None: + error = self.state_machine.reporter.error( + f"Subparser path not found: {self.options['path']}", + line=self.lineno, + ) + return [error] + parser = subparser + + # Build render config from directive options and Sphinx config + config = self._build_render_config() + + # Extract parser info + parser_info = extract_parser(parser) + + # Apply directive-level overrides + # Handle both new-style and sphinx-argparse compatibility options + if "no-description" in self.options or "nodescription" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=None, + epilog=parser_info.epilog, + argument_groups=parser_info.argument_groups, + subcommands=parser_info.subcommands, + subcommand_dest=parser_info.subcommand_dest, + ) + if "no-epilog" in self.options or "noepilog" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=parser_info.description, + epilog=None, + argument_groups=parser_info.argument_groups, + subcommands=parser_info.subcommands, + subcommand_dest=parser_info.subcommand_dest, + ) + if "nosubcommands" in self.options: + parser_info = parser_info.__class__( + prog=parser_info.prog, + usage=parser_info.usage, + bare_usage=parser_info.bare_usage, + description=parser_info.description, + epilog=parser_info.epilog, + argument_groups=parser_info.argument_groups, + subcommands=None, + subcommand_dest=None, + ) + + # Render to nodes + renderer = ArgparseRenderer(config=config, state=self.state) + return renderer.render(parser_info) + + def _build_render_config(self) -> RenderConfig: + """Build RenderConfig from directive and Sphinx config options. + + Returns + ------- + RenderConfig + Configuration for the renderer. + """ + # Start with Sphinx config defaults + config = RenderConfig.from_sphinx_config(self.config) + + # Override with directive options + # Handle both new-style and sphinx-argparse compatibility options + if "no-defaults" in self.options or "nodefault" in self.options: + config.show_defaults = False + if "no-choices" in self.options: + config.show_choices = False + if "no-types" in self.options: + config.show_types = False + + return config + + def _navigate_to_subparser( + self, parser: argparse.ArgumentParser, path: str + ) -> argparse.ArgumentParser | None: + """Navigate to a nested subparser by path. + + Parameters + ---------- + parser : argparse.ArgumentParser + The root parser. + path : str + Space-separated path to the subparser (e.g., "sync pull"). + + Returns + ------- + argparse.ArgumentParser | None + The subparser, or None if not found. + """ + import argparse as argparse_module + + current = parser + for name in path.split(): + # Find subparsers action + subparser_action = None + for action in current._actions: + if isinstance(action, argparse_module._SubParsersAction): + subparser_action = action + break + + if subparser_action is None: + return None + + # Find the named subparser + choices = subparser_action.choices or {} + if name not in choices: + return None + + current = choices[name] + + return current diff --git a/docs/_ext/sphinx_argparse_neo/nodes.py b/docs/_ext/sphinx_argparse_neo/nodes.py new file mode 100644 index 0000000..468b587 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/nodes.py @@ -0,0 +1,647 @@ +"""Custom docutils node types for argparse documentation. + +This module defines custom node types that represent the structure of +CLI documentation, along with HTML visitor functions for rendering. +""" + +from __future__ import annotations + +import typing as t + +from docutils import nodes + +if t.TYPE_CHECKING: + from sphinx.writers.html5 import HTML5Translator + +# Import the lexer - use absolute import from parent package +import pathlib +import sys + +# Add parent directory to path for lexer import +_ext_dir = pathlib.Path(__file__).parent.parent +if str(_ext_dir) not in sys.path: + sys.path.insert(0, str(_ext_dir)) + +from argparse_lexer import ArgparseUsageLexer # noqa: E402 +from sphinx_argparse_neo.utils import strip_ansi # noqa: E402 + + +def _generate_argument_id(names: list[str], id_prefix: str = "") -> str: + """Generate unique ID for an argument based on its names. + + Creates a slug-style ID suitable for HTML anchors by: + 1. Stripping leading dashes from option names + 2. Joining multiple names with hyphens + 3. Prepending optional prefix for namespace isolation + + Parameters + ---------- + names : list[str] + List of argument names (e.g., ["-L", "--socket-name"]). + id_prefix : str + Optional prefix for uniqueness (e.g., "shell" -> "shell-L-socket-name"). + + Returns + ------- + str + A slug-style ID suitable for HTML anchors. + + Examples + -------- + >>> _generate_argument_id(["-L"]) + 'L' + >>> _generate_argument_id(["--help"]) + 'help' + >>> _generate_argument_id(["-v", "--verbose"]) + 'v-verbose' + >>> _generate_argument_id(["-L"], "shell") + 'shell-L' + >>> _generate_argument_id(["filename"]) + 'filename' + >>> _generate_argument_id([]) + '' + """ + clean_names = [name.lstrip("-") for name in names if name.lstrip("-")] + if not clean_names: + return "" + name_part = "-".join(clean_names) + return f"{id_prefix}-{name_part}" if id_prefix else name_part + + +def _token_to_css_class(token_type: t.Any) -> str: + """Map a Pygments token type to its CSS class abbreviation. + + Pygments uses hierarchical token names like Token.Name.Attribute. + These map to CSS classes using abbreviations of the last two parts: + - Token.Name.Attribute β†’ 'na' (Name.Attribute) + - Token.Generic.Heading β†’ 'gh' (Generic.Heading) + - Token.Punctuation β†’ 'p' (just Punctuation) + + Parameters + ---------- + token_type : Any + A Pygments token type (from pygments.token). + + Returns + ------- + str + CSS class abbreviation, or empty string if not mappable. + + Examples + -------- + >>> from pygments.token import Token + >>> _token_to_css_class(Token.Name.Attribute) + 'na' + >>> _token_to_css_class(Token.Generic.Heading) + 'gh' + >>> _token_to_css_class(Token.Punctuation) + 'p' + >>> _token_to_css_class(Token.Text.Whitespace) + 'tw' + """ + type_str = str(token_type) + # Token string looks like "Token.Name.Attribute" or "Token.Punctuation" + parts = type_str.split(".") + + if len(parts) >= 3: + # Token.Name.Attribute -> "na" (first char of each of last two parts) + return parts[-2][0].lower() + parts[-1][0].lower() + elif len(parts) == 2: + # Token.Punctuation -> "p" (first char of last part) + return parts[-1][0].lower() + return "" + + +def _highlight_usage(usage_text: str, encode: t.Callable[[str], str]) -> str: + """Tokenize usage text and wrap tokens in highlighted span elements. + + Uses ArgparseUsageLexer to tokenize the usage string, then wraps each + token in a with the appropriate CSS class for styling. + + Parameters + ---------- + usage_text : str + The usage string to highlight (should include "usage: " prefix). + encode : Callable[[str], str] + HTML encoding function (typically translator.encode). + + Returns + ------- + str + HTML string with tokens wrapped in styled elements. + + Examples + -------- + >>> def mock_encode(s: str) -> str: + ... return s.replace("&", "&").replace("<", "<") + >>> html = _highlight_usage("usage: cmd [-h]", mock_encode) + >>> 'usage:' in html + True + >>> 'cmd' in html + True + >>> '-h' in html + True + """ + lexer = ArgparseUsageLexer() + parts: list[str] = [] + + for tok_type, tok_value in lexer.get_tokens(usage_text): + if not tok_value: + continue + + css_class = _token_to_css_class(tok_type) + escaped = encode(tok_value) + type_str = str(tok_type).lower() + + # Skip wrapping for whitespace and plain text tokens + if css_class and "whitespace" not in type_str and "text" not in type_str: + parts.append(f'{escaped}') + else: + parts.append(escaped) + + return "".join(parts) + + +def _highlight_argument_names( + names: list[str], metavar: str | None, encode: t.Callable[[str], str] +) -> str: + """Highlight argument names and metavar with appropriate CSS classes. + + Short options (-h) get class 'na' (Name.Attribute). + Long options (--help) get class 'nt' (Name.Tag). + Positional arguments get class 'nl' (Name.Label). + Metavars get class 'nv' (Name.Variable). + + Parameters + ---------- + names : list[str] + List of argument names (e.g., ["-v", "--verbose"]). + metavar : str | None + Optional metavar (e.g., "FILE", "PATH"). + encode : Callable[[str], str] + HTML encoding function. + + Returns + ------- + str + HTML string with highlighted argument signature. + + Examples + -------- + >>> def mock_encode(s: str) -> str: + ... return s + >>> html = _highlight_argument_names(["-h", "--help"], None, mock_encode) + >>> '-h' in html + True + >>> '--help' in html + True + >>> html = _highlight_argument_names(["--output"], "FILE", mock_encode) + >>> 'FILE' in html + True + >>> html = _highlight_argument_names(["sync"], None, mock_encode) + >>> 'sync' in html + True + """ + sig_parts: list[str] = [] + + for name in names: + escaped = encode(name) + if name.startswith("--"): + sig_parts.append(f'{escaped}') + elif name.startswith("-"): + sig_parts.append(f'{escaped}') + else: + # Positional argument or subcommand + sig_parts.append(f'{escaped}') + + result = ", ".join(sig_parts) + + if metavar: + escaped_metavar = encode(metavar) + result = f'{result} {escaped_metavar}' + + return result + + +class argparse_program(nodes.General, nodes.Element): + """Root node for an argparse program documentation block. + + Attributes + ---------- + prog : str + The program name. + + Examples + -------- + >>> node = argparse_program() + >>> node["prog"] = "myapp" + >>> node["prog"] + 'myapp' + """ + + pass + + +class argparse_usage(nodes.General, nodes.Element): + """Node for displaying program usage. + + Contains the usage string as a literal block. + + Examples + -------- + >>> node = argparse_usage() + >>> node["usage"] = "myapp [-h] [--verbose] command" + >>> node["usage"] + 'myapp [-h] [--verbose] command' + """ + + pass + + +class argparse_group(nodes.General, nodes.Element): + """Node for an argument group (positional, optional, or custom). + + Attributes + ---------- + title : str + The group title. + description : str | None + Optional group description. + + Examples + -------- + >>> node = argparse_group() + >>> node["title"] = "Output Options" + >>> node["title"] + 'Output Options' + """ + + pass + + +class argparse_argument(nodes.Part, nodes.Element): + """Node for a single CLI argument. + + Attributes + ---------- + names : list[str] + Argument names/flags. + help : str | None + Help text. + default : str | None + Default value string. + choices : list[str] | None + Available choices. + required : bool + Whether the argument is required. + metavar : str | None + Metavar for display. + + Examples + -------- + >>> node = argparse_argument() + >>> node["names"] = ["-v", "--verbose"] + >>> node["names"] + ['-v', '--verbose'] + """ + + pass + + +class argparse_subcommands(nodes.General, nodes.Element): + """Container node for subcommands section. + + Examples + -------- + >>> node = argparse_subcommands() + >>> node["title"] = "Commands" + >>> node["title"] + 'Commands' + """ + + pass + + +class argparse_subcommand(nodes.General, nodes.Element): + """Node for a single subcommand. + + Attributes + ---------- + name : str + Subcommand name. + aliases : list[str] + Subcommand aliases. + help : str | None + Subcommand help text. + + Examples + -------- + >>> node = argparse_subcommand() + >>> node["name"] = "sync" + >>> node["aliases"] = ["s"] + >>> node["name"] + 'sync' + """ + + pass + + +# HTML Visitor Functions + + +def visit_argparse_program_html(self: HTML5Translator, node: argparse_program) -> None: + """Visit argparse_program node - start program container. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_program + The program node being visited. + """ + prog = node.get("prog", "") + self.body.append(f'
\n') + + +def depart_argparse_program_html(self: HTML5Translator, node: argparse_program) -> None: + """Depart argparse_program node - close program container. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_program + The program node being departed. + """ + self.body.append("
\n") + + +def visit_argparse_usage_html(self: HTML5Translator, node: argparse_usage) -> None: + """Visit argparse_usage node - render usage block with syntax highlighting. + + The usage text is tokenized using ArgparseUsageLexer and wrapped in + styled elements for semantic highlighting of options, metavars, + commands, and punctuation. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_usage + The usage node being visited. + """ + usage = strip_ansi(node.get("usage", "")) + # Add both argparse-usage class and highlight class for CSS targeting + self.body.append('
')
+    # Prepend "usage: " and highlight the full usage string
+    highlighted = _highlight_usage(f"usage: {usage}", self.encode)
+    self.body.append(highlighted)
+
+
+def depart_argparse_usage_html(self: HTML5Translator, node: argparse_usage) -> None:
+    """Depart argparse_usage node - close usage block.
+
+    Parameters
+    ----------
+    self : HTML5Translator
+        The Sphinx HTML translator.
+    node : argparse_usage
+        The usage node being departed.
+    """
+    self.body.append("
\n") + + +def visit_argparse_group_html(self: HTML5Translator, node: argparse_group) -> None: + """Visit argparse_group node - start argument group. + + The title is now rendered by the parent section node, so this visitor + only handles the group container and description. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_group + The group node being visited. + """ + title = node.get("title", "") + group_id = title.lower().replace(" ", "-") if title else "arguments" + self.body.append(f'
\n') + # Title rendering removed - parent section now provides the heading + description = node.get("description") + if description: + self.body.append( + f'

{self.encode(description)}

\n' + ) + self.body.append('
\n') + + +def depart_argparse_group_html(self: HTML5Translator, node: argparse_group) -> None: + """Depart argparse_group node - close argument group. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_group + The group node being departed. + """ + self.body.append("
\n") + self.body.append("
\n") + + +def visit_argparse_argument_html( + self: HTML5Translator, node: argparse_argument +) -> None: + """Visit argparse_argument node - render argument entry with highlighting. + + Argument names are highlighted with semantic CSS classes: + - Short options (-h) get class 'na' (Name.Attribute) + - Long options (--help) get class 'nt' (Name.Tag) + - Positional arguments get class 'nl' (Name.Label) + - Metavars get class 'nv' (Name.Variable) + + The argument is wrapped in a container div with a unique ID for linking. + A headerlink anchor (ΒΆ) is added for direct navigation. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_argument + The argument node being visited. + """ + names: list[str] = node.get("names", []) + metavar = node.get("metavar") + id_prefix: str = node.get("id_prefix", "") + + # Generate unique ID for this argument + arg_id = _generate_argument_id(names, id_prefix) + + # Open wrapper div with ID for linking + if arg_id: + self.body.append(f'
\n') + else: + self.body.append('
\n') + + # Build the argument signature with syntax highlighting + highlighted_sig = _highlight_argument_names(names, metavar, self.encode) + + # Add headerlink anchor inside dt for navigation + headerlink = "" + if arg_id: + headerlink = f'ΒΆ' + + self.body.append( + f'
{highlighted_sig}{headerlink}
\n' + ) + self.body.append('
') + + # Add help text + help_text = node.get("help") + if help_text: + self.body.append(f"

{self.encode(help_text)}

") + + +def depart_argparse_argument_html( + self: HTML5Translator, node: argparse_argument +) -> None: + """Depart argparse_argument node - close argument entry. + + Adds default, choices, and type information if present. + Default values are wrapped in ```` for styled display. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_argument + The argument node being departed. + """ + # Build metadata as definition list items + default = node.get("default_string") + choices = node.get("choices") + type_name = node.get("type_name") + required = node.get("required", False) + + if default is not None or choices or type_name or required: + self.body.append('
\n') + + if default is not None: + self.body.append('
') + self.body.append('
Default
') + self.body.append( + f'
' + f'{self.encode(default)}
' + ) + self.body.append("
\n") + + if type_name: + self.body.append('
') + self.body.append('
Type
') + self.body.append( + f'
' + f'{self.encode(type_name)}
' + ) + self.body.append("
\n") + + if choices: + choices_str = ", ".join(str(c) for c in choices) + self.body.append('
') + self.body.append('
Choices
') + self.body.append( + f'
{self.encode(choices_str)}
' + ) + self.body.append("
\n") + + if required: + self.body.append('
Required
\n') + + self.body.append("
\n") + + self.body.append("
\n") + # Close wrapper div + self.body.append("
\n") + + +def visit_argparse_subcommands_html( + self: HTML5Translator, node: argparse_subcommands +) -> None: + """Visit argparse_subcommands node - start subcommands section. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommands + The subcommands node being visited. + """ + title = node.get("title", "Sub-commands") + self.body.append('
\n') + self.body.append( + f'

{self.encode(title)}

\n' + ) + + +def depart_argparse_subcommands_html( + self: HTML5Translator, node: argparse_subcommands +) -> None: + """Depart argparse_subcommands node - close subcommands section. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommands + The subcommands node being departed. + """ + self.body.append("
\n") + + +def visit_argparse_subcommand_html( + self: HTML5Translator, node: argparse_subcommand +) -> None: + """Visit argparse_subcommand node - start subcommand entry. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommand + The subcommand node being visited. + """ + name = node.get("name", "") + aliases: list[str] = node.get("aliases", []) + + self.body.append(f'
\n') + + # Subcommand header + header = name + if aliases: + alias_str = ", ".join(aliases) + header = f"{name} ({alias_str})" + self.body.append( + f'

{self.encode(header)}

\n' + ) + + # Help text + help_text = node.get("help") + if help_text: + self.body.append( + f'

{self.encode(help_text)}

\n' + ) + + +def depart_argparse_subcommand_html( + self: HTML5Translator, node: argparse_subcommand +) -> None: + """Depart argparse_subcommand node - close subcommand entry. + + Parameters + ---------- + self : HTML5Translator + The Sphinx HTML translator. + node : argparse_subcommand + The subcommand node being departed. + """ + self.body.append("
\n") diff --git a/docs/_ext/sphinx_argparse_neo/parser.py b/docs/_ext/sphinx_argparse_neo/parser.py new file mode 100644 index 0000000..f3a6db4 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/parser.py @@ -0,0 +1,659 @@ +"""Argparse introspection - extract structured data from ArgumentParser. + +This module provides dataclasses and functions to introspect argparse +ArgumentParser instances and convert them into structured data suitable +for documentation rendering. +""" + +from __future__ import annotations + +import argparse +import dataclasses +import typing as t + +from sphinx_argparse_neo.utils import strip_ansi + +# Sentinel for "no default" (distinct from None which is a valid default) +NO_DEFAULT = object() + + +@dataclasses.dataclass +class ArgumentInfo: + """Represents a single CLI argument. + + Examples + -------- + >>> info = ArgumentInfo( + ... names=["-v", "--verbose"], + ... help="Enable verbose output", + ... default=False, + ... default_string="False", + ... choices=None, + ... required=False, + ... metavar=None, + ... nargs=None, + ... action="store_true", + ... type_name=None, + ... const=True, + ... dest="verbose", + ... ) + >>> info.names + ['-v', '--verbose'] + >>> info.is_positional + False + """ + + names: list[str] + help: str | None + default: t.Any + default_string: str | None + choices: list[t.Any] | None + required: bool + metavar: str | None + nargs: str | int | None + action: str + type_name: str | None + const: t.Any + dest: str + + @property + def is_positional(self) -> bool: + """Return True if this is a positional argument. + + Examples + -------- + >>> ArgumentInfo( + ... names=["filename"], + ... help=None, + ... default=None, + ... default_string=None, + ... choices=None, + ... required=True, + ... metavar=None, + ... nargs=None, + ... action="store", + ... type_name=None, + ... const=None, + ... dest="filename", + ... ).is_positional + True + >>> ArgumentInfo( + ... names=["-f", "--file"], + ... help=None, + ... default=None, + ... default_string=None, + ... choices=None, + ... required=False, + ... metavar=None, + ... nargs=None, + ... action="store", + ... type_name=None, + ... const=None, + ... dest="file", + ... ).is_positional + False + """ + return bool(self.names) and not self.names[0].startswith("-") + + +@dataclasses.dataclass +class MutuallyExclusiveGroup: + """Arguments that cannot be used together. + + Examples + -------- + >>> group = MutuallyExclusiveGroup(arguments=[], required=True) + >>> group.required + True + """ + + arguments: list[ArgumentInfo] + required: bool + + +@dataclasses.dataclass +class ArgumentGroup: + """Named group of arguments. + + Examples + -------- + >>> group = ArgumentGroup( + ... title="Output Options", + ... description="Control output format", + ... arguments=[], + ... mutually_exclusive=[], + ... ) + >>> group.title + 'Output Options' + """ + + title: str + description: str | None + arguments: list[ArgumentInfo] + mutually_exclusive: list[MutuallyExclusiveGroup] + + +@dataclasses.dataclass +class SubcommandInfo: + """A subparser/subcommand. + + Examples + -------- + >>> sub = SubcommandInfo( + ... name="sync", + ... aliases=["s"], + ... help="Synchronize repositories", + ... parser=None, # type: ignore[arg-type] + ... ) + >>> sub.aliases + ['s'] + """ + + name: str + aliases: list[str] + help: str | None + parser: ParserInfo # Recursive reference + + +@dataclasses.dataclass +class ParserInfo: + """Complete parsed ArgumentParser. + + Examples + -------- + >>> info = ParserInfo( + ... prog="myapp", + ... usage=None, + ... bare_usage="myapp [-h] command", + ... description="My application", + ... epilog=None, + ... argument_groups=[], + ... subcommands=None, + ... subcommand_dest=None, + ... ) + >>> info.prog + 'myapp' + """ + + prog: str + usage: str | None + bare_usage: str + description: str | None + epilog: str | None + argument_groups: list[ArgumentGroup] + subcommands: list[SubcommandInfo] | None + subcommand_dest: str | None + + +def _format_default(default: t.Any) -> str | None: + """Format a default value for display. + + Parameters + ---------- + default : t.Any + The default value to format. + + Returns + ------- + str | None + Formatted string representation, or None if suppressed/unset. + + Examples + -------- + >>> _format_default(None) + 'None' + >>> _format_default("hello") + 'hello' + >>> _format_default(42) + '42' + >>> _format_default(argparse.SUPPRESS) is None + True + >>> _format_default([1, 2, 3]) + '[1, 2, 3]' + """ + if default is argparse.SUPPRESS: + return None + if default is None: + return "None" + if isinstance(default, str): + return default + return repr(default) + + +def _get_type_name(action: argparse.Action) -> str | None: + """Extract the type name from an action. + + Parameters + ---------- + action : argparse.Action + The argparse action to inspect. + + Returns + ------- + str | None + The type name, or None if no type is specified. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument("--count", type=int) + >>> _get_type_name(action) + 'int' + >>> action2 = parser.add_argument("--name") + >>> _get_type_name(action2) is None + True + """ + if action.type is None: + return None + if hasattr(action.type, "__name__"): + return action.type.__name__ + return str(action.type) + + +def _get_action_name(action: argparse.Action) -> str: + """Get the action type name. + + Parameters + ---------- + action : argparse.Action + The argparse action to inspect. + + Returns + ------- + str + The action type name. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument("--verbose", action="store_true") + >>> _get_action_name(action) + 'store_true' + >>> action2 = parser.add_argument("--file") + >>> _get_action_name(action2) + 'store' + """ + # Map action classes to their string names + action_class = type(action).__name__ + action_map = { + "_StoreAction": "store", + "_StoreTrueAction": "store_true", + "_StoreFalseAction": "store_false", + "_StoreConstAction": "store_const", + "_AppendAction": "append", + "_AppendConstAction": "append_const", + "_CountAction": "count", + "_HelpAction": "help", + "_VersionAction": "version", + "_ExtendAction": "extend", + "BooleanOptionalAction": "boolean_optional", + } + return action_map.get(action_class, action_class.lower()) + + +def _extract_argument(action: argparse.Action) -> ArgumentInfo: + """Extract ArgumentInfo from an argparse Action. + + Parameters + ---------- + action : argparse.Action + The argparse action to extract information from. + + Returns + ------- + ArgumentInfo + Structured argument information. + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> action = parser.add_argument( + ... "-v", "--verbose", + ... action="store_true", + ... help="Enable verbose mode", + ... ) + >>> info = _extract_argument(action) + >>> info.names + ['-v', '--verbose'] + >>> info.action + 'store_true' + """ + # Determine names - option_strings for optionals, dest for positionals + names = list(action.option_strings) if action.option_strings else [action.dest] + + # Determine if required + required = action.required if hasattr(action, "required") else False + # Positional arguments are required by default (unless nargs makes them optional) + if not action.option_strings: + required = action.nargs not in ("?", "*", argparse.REMAINDER) + + # Format metavar + metavar = action.metavar + if isinstance(metavar, tuple): + metavar = " ".join(metavar) + + # Handle default + default = action.default + default_string = _format_default(default) + + return ArgumentInfo( + names=names, + help=action.help if action.help != argparse.SUPPRESS else None, + default=default if default is not argparse.SUPPRESS else NO_DEFAULT, + default_string=default_string, + choices=list(action.choices) if action.choices else None, + required=required, + metavar=metavar, + nargs=action.nargs, + action=_get_action_name(action), + type_name=_get_type_name(action), + const=action.const, + dest=action.dest, + ) + + +def _extract_mutex_groups( + parser: argparse.ArgumentParser, +) -> dict[int, MutuallyExclusiveGroup]: + """Extract mutually exclusive groups from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + + Returns + ------- + dict[int, MutuallyExclusiveGroup] + Mapping from action id to the MutuallyExclusiveGroup it belongs to. + + Examples + -------- + Extract mutually exclusive groups from a parser with one group: + + >>> parser = argparse.ArgumentParser() + >>> group = parser.add_mutually_exclusive_group() + >>> _ = group.add_argument("--foo", help="Use foo") + >>> _ = group.add_argument("--bar", help="Use bar") + >>> mutex_map = _extract_mutex_groups(parser) + >>> len(mutex_map) + 2 + + Each action in the group maps to the same MutuallyExclusiveGroup: + + >>> values = list(mutex_map.values()) + >>> values[0] is values[1] + True + >>> len(values[0].arguments) + 2 + >>> [arg.names[0] for arg in values[0].arguments] + ['--foo', '--bar'] + + A parser without mutex groups returns an empty mapping: + + >>> parser2 = argparse.ArgumentParser() + >>> _ = parser2.add_argument("--verbose") + >>> _extract_mutex_groups(parser2) + {} + """ + mutex_map: dict[int, MutuallyExclusiveGroup] = {} + + for mutex_group in parser._mutually_exclusive_groups: + group_info = MutuallyExclusiveGroup( + arguments=[ + _extract_argument(action) + for action in mutex_group._group_actions + if action.help != argparse.SUPPRESS + ], + required=mutex_group.required, + ) + for action in mutex_group._group_actions: + mutex_map[id(action)] = group_info + + return mutex_map + + +def _extract_argument_groups( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> list[ArgumentGroup]: + """Extract argument groups from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + hide_suppressed : bool + Whether to hide arguments with SUPPRESS help. + + Returns + ------- + list[ArgumentGroup] + List of argument groups. + + Examples + -------- + >>> parser = argparse.ArgumentParser(description="Test") + >>> _ = parser.add_argument("filename", help="Input file") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> groups = _extract_argument_groups(parser) + >>> len(groups) >= 2 # positional and optional groups + True + """ + mutex_map = _extract_mutex_groups(parser) + seen_mutex: set[int] = set() + groups: list[ArgumentGroup] = [] + + for group in parser._action_groups: + arguments: list[ArgumentInfo] = [] + mutex_groups: list[MutuallyExclusiveGroup] = [] + + for action in group._group_actions: + # Skip help action and suppressed actions + if isinstance(action, argparse._HelpAction): + continue + if hide_suppressed and action.help == argparse.SUPPRESS: + continue + # Skip subparser actions - handled separately + if isinstance(action, argparse._SubParsersAction): + continue + + # Check if this action is in a mutex group + if id(action) in mutex_map: + mutex_info = mutex_map[id(action)] + mutex_id = id(mutex_info) + if mutex_id not in seen_mutex: + seen_mutex.add(mutex_id) + mutex_groups.append(mutex_info) + else: + arguments.append(_extract_argument(action)) + + # Skip empty groups + if not arguments and not mutex_groups: + continue + + groups.append( + ArgumentGroup( + title=group.title or "", + description=group.description, + arguments=arguments, + mutually_exclusive=mutex_groups, + ) + ) + + return groups + + +def _extract_subcommands( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> tuple[list[SubcommandInfo] | None, str | None]: + """Extract subcommands from a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract from. + hide_suppressed : bool + Whether to hide subcommands with SUPPRESS help. + + Returns + ------- + tuple[list[SubcommandInfo] | None, str | None] + Tuple of (subcommands list, destination variable name). + + Examples + -------- + >>> parser = argparse.ArgumentParser() + >>> subparsers = parser.add_subparsers(dest="command") + >>> _ = subparsers.add_parser("sync", help="Sync repos") + >>> _ = subparsers.add_parser("add", help="Add repo") + >>> subs, dest = _extract_subcommands(parser) + >>> dest + 'command' + >>> len(subs) + 2 + """ + for action in parser._actions: + if isinstance(action, argparse._SubParsersAction): + subcommands: list[SubcommandInfo] = [] + + # Get the choices (subparsers) + choices = action.choices or {} + + # Build reverse mapping of aliases + # action._parser_class might have name_parser_map with aliases + alias_map: dict[str, list[str]] = {} + seen_parsers: dict[int, str] = {} + + for name, subparser in choices.items(): + parser_id = id(subparser) + if parser_id in seen_parsers: + # This is an alias + primary = seen_parsers[parser_id] + if primary not in alias_map: + alias_map[primary] = [] + alias_map[primary].append(name) + else: + seen_parsers[parser_id] = name + + # Now extract subcommand info + processed: set[int] = set() + for name, subparser in choices.items(): + parser_id = id(subparser) + if parser_id in processed: + continue + processed.add(parser_id) + + # Get help text + help_text: str | None = None + if hasattr(action, "_choices_actions"): + for choice_action in action._choices_actions: + if choice_action.dest == name: + help_text = choice_action.help + break + + if hide_suppressed and help_text == argparse.SUPPRESS: + continue + + # Recursively extract parser info + sub_info = extract_parser(subparser, hide_suppressed=hide_suppressed) + + subcommands.append( + SubcommandInfo( + name=name, + aliases=alias_map.get(name, []), + help=help_text, + parser=sub_info, + ) + ) + + return subcommands, action.dest + + return None, None + + +def _generate_usage(parser: argparse.ArgumentParser) -> str: + """Generate the usage string for a parser. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to generate usage for. + + Returns + ------- + str + The bare usage string (without "usage: " prefix). + + Examples + -------- + >>> parser = argparse.ArgumentParser(prog="myapp") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> usage = _generate_usage(parser) + >>> "myapp" in usage + True + """ + # Use argparse's built-in formatter to generate usage + formatter = parser._get_formatter() + formatter.add_usage( + parser.usage, parser._actions, parser._mutually_exclusive_groups + ) + usage: str = formatter.format_help().strip() + + # Strip ANSI codes before checking prefix (handles FORCE_COLOR edge case) + usage = strip_ansi(usage) + + # Remove "usage: " prefix if present + if usage.lower().startswith("usage:"): + usage = usage[6:].strip() + + return usage + + +def extract_parser( + parser: argparse.ArgumentParser, + hide_suppressed: bool = True, +) -> ParserInfo: + """Extract complete parser information. + + Parameters + ---------- + parser : argparse.ArgumentParser + The parser to extract information from. + hide_suppressed : bool + Whether to hide arguments/subcommands with SUPPRESS help. + + Returns + ------- + ParserInfo + Complete structured parser information. + + Examples + -------- + >>> parser = argparse.ArgumentParser( + ... prog="myapp", + ... description="My application", + ... ) + >>> _ = parser.add_argument("filename", help="Input file") + >>> _ = parser.add_argument("-v", "--verbose", action="store_true") + >>> info = extract_parser(parser) + >>> info.prog + 'myapp' + >>> info.description + 'My application' + >>> len(info.argument_groups) >= 1 + True + """ + subcommands, subcommand_dest = _extract_subcommands(parser, hide_suppressed) + + return ParserInfo( + prog=parser.prog, + usage=parser.usage, + bare_usage=_generate_usage(parser), + description=parser.description, + epilog=parser.epilog, + argument_groups=_extract_argument_groups(parser, hide_suppressed), + subcommands=subcommands, + subcommand_dest=subcommand_dest, + ) diff --git a/docs/_ext/sphinx_argparse_neo/renderer.py b/docs/_ext/sphinx_argparse_neo/renderer.py new file mode 100644 index 0000000..d2eb351 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/renderer.py @@ -0,0 +1,604 @@ +"""Renderer - convert ParserInfo to docutils nodes. + +This module provides the ArgparseRenderer class that transforms +structured parser information into docutils nodes for documentation. +""" + +from __future__ import annotations + +import dataclasses +import typing as t + +from docutils import nodes +from docutils.statemachine import StringList +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) +from sphinx_argparse_neo.parser import ( + ArgumentGroup, + ArgumentInfo, + MutuallyExclusiveGroup, + ParserInfo, + SubcommandInfo, +) +from sphinx_argparse_neo.utils import escape_rst_emphasis + +if t.TYPE_CHECKING: + from docutils.parsers.rst.states import RSTState + from sphinx.config import Config + + +@dataclasses.dataclass +class RenderConfig: + """Configuration for the renderer. + + Examples + -------- + >>> config = RenderConfig() + >>> config.show_defaults + True + >>> config.group_title_prefix + '' + """ + + group_title_prefix: str = "" + show_defaults: bool = True + show_choices: bool = True + show_types: bool = True + + @classmethod + def from_sphinx_config(cls, config: Config) -> RenderConfig: + """Create RenderConfig from Sphinx configuration. + + Parameters + ---------- + config : Config + Sphinx configuration object. + + Returns + ------- + RenderConfig + Render configuration based on Sphinx config values. + """ + return cls( + group_title_prefix=getattr(config, "argparse_group_title_prefix", ""), + show_defaults=getattr(config, "argparse_show_defaults", True), + show_choices=getattr(config, "argparse_show_choices", True), + show_types=getattr(config, "argparse_show_types", True), + ) + + +class ArgparseRenderer: + """Render ParserInfo to docutils nodes. + + This class can be subclassed to customize rendering behavior. + Override individual methods to change how specific elements are rendered. + + Parameters + ---------- + config : RenderConfig + Rendering configuration. + state : RSTState | None + RST state for parsing nested RST content. + + Examples + -------- + >>> from sphinx_argparse_neo.parser import ParserInfo + >>> config = RenderConfig() + >>> renderer = ArgparseRenderer(config) + >>> info = ParserInfo( + ... prog="myapp", + ... usage=None, + ... bare_usage="myapp [-h]", + ... description="My app", + ... epilog=None, + ... argument_groups=[], + ... subcommands=None, + ... subcommand_dest=None, + ... ) + >>> result = renderer.render(info) + >>> isinstance(result, list) + True + """ + + def __init__( + self, + config: RenderConfig | None = None, + state: RSTState | None = None, + ) -> None: + """Initialize the renderer.""" + self.config = config or RenderConfig() + self.state = state + + @staticmethod + def _extract_id_prefix(prog: str) -> str: + """Extract subcommand from prog for unique section IDs. + + Parameters + ---------- + prog : str + The program name, potentially with subcommand (e.g., "myapp sub"). + + Returns + ------- + str + The subcommand part for use as ID prefix, or empty string if none. + + Examples + -------- + >>> ArgparseRenderer._extract_id_prefix("myapp sub") + 'sub' + >>> ArgparseRenderer._extract_id_prefix("myapp") + '' + >>> ArgparseRenderer._extract_id_prefix("prog cmd") + 'cmd' + >>> ArgparseRenderer._extract_id_prefix("myapp sub cmd") + 'sub-cmd' + """ + parts = prog.split() + if len(parts) <= 1: + return "" + # Join remaining parts with hyphen for multi-level subcommands + return "-".join(parts[1:]) + + def render(self, parser_info: ParserInfo) -> list[nodes.Node]: + """Render a complete parser to docutils nodes. + + Parameters + ---------- + parser_info : ParserInfo + The parsed parser information. + + Returns + ------- + list[nodes.Node] + List of docutils nodes representing the documentation. + + Note + ---- + Sections for Usage and argument groups are emitted as siblings of + argparse_program rather than children. This allows Sphinx's + TocTreeCollector to discover them for inclusion in the table of + contents. + + The rendered structure is: + + - argparse_program (description only, no "examples:" part) + - section#usage (h3 "Usage" with usage block) + - section#positional-arguments (h3) + - section#options (h3) + + The "examples:" definition list in descriptions is left for + argparse_exemplar.py to transform into a proper Examples section. + """ + result: list[nodes.Node] = [] + + # Create program container for description only + program_node = argparse_program() + program_node["prog"] = parser_info.prog + + # Add description (may contain "examples:" definition list for later + # transformation by argparse_exemplar.py) + if parser_info.description: + desc_nodes = self._parse_text(parser_info.description) + program_node.extend(desc_nodes) + + result.append(program_node) + + # Extract ID prefix from prog for unique section IDs + # e.g., "myapp sub" -> "sub", "myapp" -> "" + id_prefix = self._extract_id_prefix(parser_info.prog) + + # Add Usage section as sibling (for TOC visibility) + usage_section = self.render_usage_section(parser_info, id_prefix=id_prefix) + result.append(usage_section) + + # Add argument groups as sibling sections (for TOC visibility) + for group in parser_info.argument_groups: + group_section = self.render_group_section(group, id_prefix=id_prefix) + result.append(group_section) + + # Add subcommands + if parser_info.subcommands: + subcommands_node = self.render_subcommands(parser_info.subcommands) + result.append(subcommands_node) + + # Add epilog + if parser_info.epilog: + epilog_nodes = self._parse_text(parser_info.epilog) + result.extend(epilog_nodes) + + return self.post_process(result) + + def render_usage(self, parser_info: ParserInfo) -> argparse_usage: + """Render the usage block. + + Parameters + ---------- + parser_info : ParserInfo + The parser information. + + Returns + ------- + argparse_usage + Usage node. + """ + usage_node = argparse_usage() + usage_node["usage"] = parser_info.bare_usage + return usage_node + + def render_usage_section( + self, parser_info: ParserInfo, *, id_prefix: str = "" + ) -> nodes.section: + """Render usage as a section with heading for TOC visibility. + + Creates a proper section node with "Usage" heading containing the + usage block. This structure allows Sphinx's TocTreeCollector to + discover it for the table of contents. + + Parameters + ---------- + parser_info : ParserInfo + The parser information. + id_prefix : str + Optional prefix for the section ID (e.g., "load" -> "load-usage"). + Used to ensure unique IDs when multiple argparse directives exist + on the same page. + + Returns + ------- + nodes.section + Section node containing the usage block with a "Usage" heading. + + Examples + -------- + >>> from sphinx_argparse_neo.parser import ParserInfo + >>> renderer = ArgparseRenderer() + >>> info = ParserInfo( + ... prog="myapp", + ... usage=None, + ... bare_usage="myapp [-h] command", + ... description=None, + ... epilog=None, + ... argument_groups=[], + ... subcommands=None, + ... subcommand_dest=None, + ... ) + >>> section = renderer.render_usage_section(info) + >>> section["ids"] + ['usage'] + + With prefix for subcommand pages: + + >>> section = renderer.render_usage_section(info, id_prefix="load") + >>> section["ids"] + ['load-usage'] + >>> section.children[0].astext() + 'Usage' + """ + section_id = f"{id_prefix}-usage" if id_prefix else "usage" + section = nodes.section() + section["ids"] = [section_id] + section["names"] = [nodes.fully_normalize_name("Usage")] + section += nodes.title("Usage", "Usage") + + usage_node = argparse_usage() + usage_node["usage"] = parser_info.bare_usage + section += usage_node + + return section + + def render_group_section( + self, group: ArgumentGroup, *, id_prefix: str = "" + ) -> nodes.section: + """Render an argument group wrapped in a section for TOC visibility. + + Creates a proper section node with the group title as heading, + containing the argparse_group node. This structure allows Sphinx's + TocTreeCollector to discover it for the table of contents. + + Parameters + ---------- + group : ArgumentGroup + The argument group to render. + id_prefix : str + Optional prefix for the section ID (e.g., "load" -> "load-options"). + Used to ensure unique IDs when multiple argparse directives exist + on the same page. + + Returns + ------- + nodes.section + Section node containing the group for TOC discovery. + + Examples + -------- + >>> from sphinx_argparse_neo.parser import ArgumentGroup + >>> renderer = ArgparseRenderer() + >>> group = ArgumentGroup( + ... title="positional arguments", + ... description=None, + ... arguments=[], + ... mutually_exclusive=[], + ... ) + >>> section = renderer.render_group_section(group) + >>> section["ids"] + ['positional-arguments'] + + With prefix for subcommand pages: + + >>> section = renderer.render_group_section(group, id_prefix="load") + >>> section["ids"] + ['load-positional-arguments'] + >>> section.children[0].astext() + 'Positional Arguments' + """ + # Title case the group title for proper display + raw_title = group.title or "Arguments" + title = raw_title.title() # "positional arguments" -> "Positional Arguments" + + if self.config.group_title_prefix: + title = f"{self.config.group_title_prefix}{title}" + + # Generate section ID from title (with optional prefix for uniqueness) + base_id = title.lower().replace(" ", "-") + section_id = f"{id_prefix}-{base_id}" if id_prefix else base_id + + # Create section wrapper for TOC discovery + section = nodes.section() + section["ids"] = [section_id] + section["names"] = [nodes.fully_normalize_name(title)] + + # Add title for TOC - Sphinx's TocTreeCollector looks for this + section += nodes.title(title, title) + + # Create the styled group container (with empty title - section provides it) + # Pass id_prefix to render_group so arguments get unique IDs + group_node = self.render_group(group, include_title=False, id_prefix=id_prefix) + section += group_node + + return section + + def render_group( + self, + group: ArgumentGroup, + include_title: bool = True, + *, + id_prefix: str = "", + ) -> argparse_group: + """Render an argument group. + + Parameters + ---------- + group : ArgumentGroup + The argument group to render. + include_title : bool + Whether to include the title in the group node. When False, + the title is assumed to come from a parent section node. + Default is True for backwards compatibility. + id_prefix : str + Optional prefix for argument IDs (e.g., "shell" -> "shell-h"). + Used to ensure unique IDs when multiple argparse directives exist + on the same page. + + Returns + ------- + argparse_group + Group node containing argument nodes. + """ + group_node = argparse_group() + + if include_title: + title = group.title + if self.config.group_title_prefix: + title = f"{self.config.group_title_prefix}{title}" + group_node["title"] = title + else: + # Title provided by parent section + group_node["title"] = "" + + group_node["description"] = group.description + + # Add individual arguments + for arg in group.arguments: + arg_node = self.render_argument(arg, id_prefix=id_prefix) + group_node.append(arg_node) + + # Add mutually exclusive groups + for mutex in group.mutually_exclusive: + mutex_nodes = self.render_mutex_group(mutex, id_prefix=id_prefix) + group_node.extend(mutex_nodes) + + return group_node + + def render_argument( + self, arg: ArgumentInfo, *, id_prefix: str = "" + ) -> argparse_argument: + """Render a single argument. + + Parameters + ---------- + arg : ArgumentInfo + The argument to render. + id_prefix : str + Optional prefix for the argument ID (e.g., "shell" -> "shell-L"). + Used to ensure unique IDs when multiple argparse directives exist + on the same page. + + Returns + ------- + argparse_argument + Argument node. + """ + arg_node = argparse_argument() + arg_node["names"] = arg.names + arg_node["help"] = arg.help + arg_node["metavar"] = arg.metavar + arg_node["required"] = arg.required + arg_node["id_prefix"] = id_prefix + + if self.config.show_defaults: + arg_node["default_string"] = arg.default_string + + if self.config.show_choices: + arg_node["choices"] = arg.choices + + if self.config.show_types: + arg_node["type_name"] = arg.type_name + + return arg_node + + def render_mutex_group( + self, mutex: MutuallyExclusiveGroup, *, id_prefix: str = "" + ) -> list[argparse_argument]: + """Render a mutually exclusive group. + + Parameters + ---------- + mutex : MutuallyExclusiveGroup + The mutually exclusive group. + id_prefix : str + Optional prefix for argument IDs (e.g., "shell" -> "shell-h"). + + Returns + ------- + list[argparse_argument] + List of argument nodes with mutex indicator. + """ + result: list[argparse_argument] = [] + for arg in mutex.arguments: + arg_node = self.render_argument(arg, id_prefix=id_prefix) + # Mark as part of mutex group + arg_node["mutex"] = True + arg_node["mutex_required"] = mutex.required + result.append(arg_node) + return result + + def render_subcommands( + self, subcommands: list[SubcommandInfo] + ) -> argparse_subcommands: + """Render subcommands section. + + Parameters + ---------- + subcommands : list[SubcommandInfo] + List of subcommand information. + + Returns + ------- + argparse_subcommands + Subcommands container node. + """ + container = argparse_subcommands() + container["title"] = "Sub-commands" + + for subcmd in subcommands: + subcmd_node = self.render_subcommand(subcmd) + container.append(subcmd_node) + + return container + + def render_subcommand(self, subcmd: SubcommandInfo) -> argparse_subcommand: + """Render a single subcommand. + + Parameters + ---------- + subcmd : SubcommandInfo + The subcommand information. + + Returns + ------- + argparse_subcommand + Subcommand node, potentially containing nested parser content. + """ + subcmd_node = argparse_subcommand() + subcmd_node["name"] = subcmd.name + subcmd_node["aliases"] = subcmd.aliases + subcmd_node["help"] = subcmd.help + + # Recursively render the subcommand's parser + if subcmd.parser: + nested_nodes = self.render(subcmd.parser) + subcmd_node.extend(nested_nodes) + + return subcmd_node + + def post_process(self, result_nodes: list[nodes.Node]) -> list[nodes.Node]: + """Post-process the rendered nodes. + + Override this method to apply transformations after rendering. + + Parameters + ---------- + result_nodes : list[nodes.Node] + The rendered nodes. + + Returns + ------- + list[nodes.Node] + Post-processed nodes. + """ + return result_nodes + + def _parse_text(self, text: str) -> list[nodes.Node]: + """Parse text as RST or MyST content. + + Parameters + ---------- + text : str + Text to parse. + + Returns + ------- + list[nodes.Node] + Parsed docutils nodes. + """ + if not text: + return [] + + # Escape RST emphasis patterns before parsing (e.g., "django-*" -> "django-\*") + text = escape_rst_emphasis(text) + + if self.state is None: + # No state machine available, return as paragraph + para = nodes.paragraph(text=text) + return [para] + + # Use the state machine to parse RST + container = nodes.container() + self.state.nested_parse( + StringList(text.split("\n")), + 0, + container, + ) + return list(container.children) + + +def create_renderer( + config: RenderConfig | None = None, + state: RSTState | None = None, + renderer_class: type[ArgparseRenderer] | None = None, +) -> ArgparseRenderer: + """Create a renderer instance. + + Parameters + ---------- + config : RenderConfig | None + Rendering configuration. + state : RSTState | None + RST state for parsing. + renderer_class : type[ArgparseRenderer] | None + Custom renderer class to use. + + Returns + ------- + ArgparseRenderer + Configured renderer instance. + """ + cls = renderer_class or ArgparseRenderer + return cls(config=config, state=state) diff --git a/docs/_ext/sphinx_argparse_neo/utils.py b/docs/_ext/sphinx_argparse_neo/utils.py new file mode 100644 index 0000000..c1a8275 --- /dev/null +++ b/docs/_ext/sphinx_argparse_neo/utils.py @@ -0,0 +1,78 @@ +"""Text processing utilities for sphinx_argparse_neo. + +This module provides utilities for cleaning argparse output before rendering: +- strip_ansi: Remove ANSI escape codes (for when FORCE_COLOR is set) +""" + +from __future__ import annotations + +import re + +# ANSI escape code pattern - matches CSI sequences like \033[32m, \033[1;34m, etc. +_ANSI_RE = re.compile(r"\033\[[;?0-9]*[a-zA-Z]") + + +def strip_ansi(text: str) -> str: + r"""Remove ANSI escape codes from text. + + When FORCE_COLOR is set in the environment, argparse may include ANSI + escape codes in its output. This function removes them so the output + renders correctly in Sphinx documentation. + + Parameters + ---------- + text : str + Text potentially containing ANSI codes. + + Returns + ------- + str + Text with ANSI codes removed. + + Examples + -------- + >>> strip_ansi("plain text") + 'plain text' + >>> strip_ansi("\033[32mgreen\033[0m") + 'green' + >>> strip_ansi("\033[1;34mbold blue\033[0m") + 'bold blue' + """ + return _ANSI_RE.sub("", text) + + +# RST emphasis pattern: matches -* that would trigger inline emphasis errors. +# Pattern matches: non-whitespace/non-backslash char, followed by -*, NOT followed by +# another * (which would be strong emphasis **). +_RST_EMPHASIS_RE = re.compile(r"(?<=[^\s\\])-\*(?!\*)") + + +def escape_rst_emphasis(text: str) -> str: + r"""Escape asterisks that would trigger RST inline emphasis. + + RST interprets ``*text*`` as emphasis. When argparse help text contains + glob patterns like ``django-*``, the ``-*`` sequence triggers RST + "Inline emphasis start-string without end-string" warnings. + + This function escapes such asterisks to prevent RST parsing errors. + + Parameters + ---------- + text : str + Text potentially containing problematic asterisks. + + Returns + ------- + str + Text with asterisks escaped where needed. + + Examples + -------- + >>> escape_rst_emphasis('myapp load "my-*"') + 'myapp load "my-\\*"' + >>> escape_rst_emphasis("plain text") + 'plain text' + >>> escape_rst_emphasis("*emphasis* is ok") + '*emphasis* is ok' + """ + return _RST_EMPHASIS_RE.sub(r"-\*", text) diff --git a/docs/_static/css/argparse-highlight.css b/docs/_static/css/argparse-highlight.css new file mode 100644 index 0000000..f232c71 --- /dev/null +++ b/docs/_static/css/argparse-highlight.css @@ -0,0 +1,437 @@ +/* + * Argparse/CLI Highlighting Styles + * + * Styles for CLI inline roles and argparse help output highlighting. + * Uses "One Dark" inspired color palette (Python 3.14 argparse style). + * + * Color Palette: + * Background: #282C34 + * Default text: #CCCED4 + * Usage label: #61AFEF (blue, bold) + * Program name: #C678DD (purple, bold) + * Subcommands: #98C379 (green) + * Options: #56B6C2 (teal) + * Metavars: #E5C07B (yellow, italic) + * Choices: #98C379 (green) + * Headings: #E5E5E5 (bright, bold) + * Punctuation: #CCCED4 + */ + +/* ========================================================================== + Inline Role Styles + ========================================================================== */ + +/* + * Shared monospace font and code font-size for all CLI inline roles + */ +.cli-option, +.cli-metavar, +.cli-command, +.cli-default, +.cli-choice { + font-family: var(--font-stack--monospace); + font-size: var(--code-font-size); +} + +/* + * CLI Options + * + * Long options (--verbose) and short options (-h) both use teal color. + */ +.cli-option-long, +.cli-option-short { + color: #56b6c2; +} + +/* + * CLI Metavars + * + * Placeholder values like FILE, PATH, DIRECTORY. + * Yellow/amber to indicate "replace me" - distinct from flags (teal). + */ +.cli-metavar { + color: #e5c07b; + font-style: italic; +} + +/* + * CLI Commands and Choices + * + * Both use green to indicate valid enumerated values. + * Commands: subcommand names like sync, add, list + * Choices: enumeration values like json, yaml, table + */ +.cli-command, +.cli-choice { + color: #98c379; +} + +.cli-command { + font-weight: bold; +} + +/* + * CLI Default Values + * + * Default values shown in help text like None, "auto". + * Subtle styling to not distract from options. + */ +.cli-default { + color: #ccced4; + font-style: italic; +} + +/* ========================================================================== + Argparse Code Block Highlighting + ========================================================================== */ + +/* + * These styles apply within Pygments-highlighted code blocks using the + * argparse, argparse-usage, or argparse-help lexers. + */ + +/* Usage heading "usage:" - bold blue */ +.highlight-argparse .gh, +.highlight-argparse-usage .gh, +.highlight-argparse-help .gh { + color: #61afef; + font-weight: bold; +} + +/* Section headers like "positional arguments:", "options:" - neutral bright */ +.highlight-argparse .gs, +.highlight-argparse-help .gs { + color: #e5e5e5; + font-weight: bold; +} + +/* Long options --foo - teal */ +.highlight-argparse .nt, +.highlight-argparse-usage .nt, +.highlight-argparse-help .nt { + color: #56b6c2; + font-weight: normal; +} + +/* Short options -h - teal (same as long) */ +.highlight-argparse .na, +.highlight-argparse-usage .na, +.highlight-argparse-help .na { + color: #56b6c2; + font-weight: normal; +} + +/* Metavar placeholders FILE, PATH - yellow/amber italic */ +.highlight-argparse .nv, +.highlight-argparse-usage .nv, +.highlight-argparse-help .nv { + color: #e5c07b; + font-style: italic; +} + +/* Command/program names - purple bold */ +.highlight-argparse .nl, +.highlight-argparse-usage .nl, +.highlight-argparse-help .nl { + color: #c678dd; + font-weight: bold; +} + +/* Subcommands - bold green */ +.highlight-argparse .nf, +.highlight-argparse-usage .nf, +.highlight-argparse-help .nf { + color: #98c379; + font-weight: bold; +} + +/* Choice values - green */ +.highlight-argparse .no, +.highlight-argparse-usage .no, +.highlight-argparse-help .no, +.highlight-argparse .nc, +.highlight-argparse-usage .nc, +.highlight-argparse-help .nc { + color: #98c379; +} + +/* Punctuation [], {}, () - neutral gray */ +.highlight-argparse .p, +.highlight-argparse-usage .p, +.highlight-argparse-help .p { + color: #ccced4; +} + +/* Operators like | - neutral gray */ +.highlight-argparse .o, +.highlight-argparse-usage .o, +.highlight-argparse-help .o { + color: #ccced4; + font-weight: normal; +} + +/* ========================================================================== + Argparse Directive Highlighting (.. argparse:: output) + ========================================================================== */ + +/* + * These styles apply to the argparse directive output which uses custom + * nodes rendered by sphinx_argparse_neo. The directive adds highlight spans + * directly to the HTML output. + */ + +/* + * Usage Block (.argparse-usage) + * + * The usage block now has both .argparse-usage and .highlight-argparse-usage + * classes. The .highlight-argparse-usage selectors above already handle the + * token highlighting. These selectors provide fallback and ensure consistent + * styling. + */ + +/* Usage block container - match Pygments monokai background and code block styling */ +pre.argparse-usage { + background: var(--argparse-code-background); + font-size: var(--code-font-size); + padding: 0.625rem 0.875rem; + line-height: 1.5; + border-radius: 0.2rem; + scrollbar-color: var(--color-foreground-border) transparent; + scrollbar-width: thin; +} + +.argparse-usage .gh { + color: #61afef; + font-weight: bold; +} + +.argparse-usage .nt { + color: #56b6c2; + font-weight: normal; +} + +.argparse-usage .na { + color: #56b6c2; + font-weight: normal; +} + +.argparse-usage .nv { + color: #e5c07b; + font-style: italic; +} + +.argparse-usage .nl { + color: #c678dd; + font-weight: bold; +} + +.argparse-usage .nf { + color: #98c379; + font-weight: bold; +} + +.argparse-usage .no, +.argparse-usage .nc { + color: #98c379; +} + +.argparse-usage .o { + color: #ccced4; + font-weight: normal; +} + +.argparse-usage .p { + color: #ccced4; +} + +/* + * Argument Name (
) + * + * The argument names in the dl/dt structure are highlighted with + * semantic spans for options and metavars. + */ +.argparse-argument-name .nt { + color: #56b6c2; + font-weight: normal; +} + +.argparse-argument-name .na { + color: #56b6c2; + font-weight: normal; +} + +.argparse-argument-name .nv { + color: #e5c07b; + font-style: italic; +} + +.argparse-argument-name .nl { + color: #c678dd; + font-weight: bold; +} + +/* ========================================================================== + Argument Wrapper and Linking Styles + ========================================================================== */ + +/* + * Argparse-specific code background (monokai #272822) + * Uses a custom variable to avoid affecting Furo's --color-inline-code-background. + */ +:root { + --argparse-code-background: #272822; +} + +/* + * Background styling for argument names - subtle background like code.literal + * This provides visual weight and hierarchy for argument definitions. + */ +.argparse-argument-name { + background: var(--argparse-code-background); + border-radius: 0.2rem; + padding: 0.485rem 0.875rem; + font-family: var(--font-stack--monospace); + font-size: var(--code-font-size); + width: fit-content; + position: relative; +} + +/* + * Wrapper for linking - enables scroll-margin for fixed header navigation + * and :target pseudo-class for highlighting when linked. + */ +.argparse-argument-wrapper { + scroll-margin-top: 2.5rem; +} + +/* + * Headerlink anchor (ΒΆ) - hidden until hover + * Positioned outside the dt element to the right. + * Follows Sphinx documentation convention for linkable headings. + */ +.argparse-argument-name .headerlink { + visibility: hidden; + position: absolute; + right: -1.5rem; + top: 50%; + transform: translateY(-50%); + color: var(--color-foreground-secondary); + text-decoration: none; +} + +/* + * Show headerlink on hover or when targeted via URL fragment + */ +.argparse-argument-wrapper:hover .headerlink, +.argparse-argument-wrapper:target .headerlink { + visibility: visible; +} + +.argparse-argument-name .headerlink:hover:not(:visited) { + color: var(--color-foreground-primary); +} + +/* + * Light mode headerlink color overrides + * Needed because code block has dark background regardless of theme + */ +body[data-theme="light"] .argparse-argument-name .headerlink { + color: #9ca0a5; + + &:hover:not(:visited) { + color: #cfd0d0; + } +} + +@media (prefers-color-scheme: light) { + body:not([data-theme="dark"]) .argparse-argument-name .headerlink { + color: #9ca0a5; + + &:hover:not(:visited) { + color: #cfd0d0; + } + } +} + +/* + * Highlight when targeted via URL fragment + * Uses Furo's highlight-on-target color for consistency. + */ +.argparse-argument-wrapper:target .argparse-argument-name { + background-color: var(--color-highlight-on-target); +} + +/* + * Argument metadata definition list + * + * Renders metadata (Default, Type, Choices, Required) as a horizontal + * flexbox of key-value pairs and standalone tags. + */ +.argparse-argument-meta { + margin: 0.5rem 0 0 0; + padding: 0; + display: flex; + flex-wrap: wrap; + gap: 0.5rem 1rem; + align-items: center; +} + +.argparse-meta-item { + display: flex; + align-items: center; + gap: 0.25rem; +} + +.argparse-meta-key { + color: var(--color-foreground-secondary, #6c757d); + font-size: var(--code-font-size); +} + +.argparse-meta-key::after { + content: ":"; +} + +.argparse-meta-value .nv { + background: var(--argparse-code-background); + border-radius: 0.2rem; + padding: 0.1rem 0.3rem; + font-family: var(--font-stack--monospace); + font-size: var(--code-font-size); + color: #e5c07b; +} + +/* + * Meta tag (e.g., "Required") - follows Furo's guilabel pattern + * Uses semi-transparent amber background with border for visibility + * without the harshness of solid fills. Amber conveys "needs attention". + */ +.argparse-meta-tag { + background-color: #fef3c780; + border: 1px solid #fcd34d80; + color: var(--color-foreground-primary); + font-size: var(--code-font-size); + padding: 0.1rem 0.4rem; + border-radius: 0.2rem; + font-weight: 500; +} + +/* Dark mode: darker amber with adjusted border */ +body[data-theme="dark"] .argparse-meta-tag { + background-color: #78350f60; + border-color: #b4530980; +} + +@media (prefers-color-scheme: dark) { + body:not([data-theme="light"]) .argparse-meta-tag { + background-color: #78350f60; + border-color: #b4530980; + } +} + +/* + * Help text description + * Adds spacing above for visual separation from argument name. + */ +.argparse-argument-help { + padding-block-start: 0.5rem; +} diff --git a/docs/cli/index.md b/docs/cli/index.md new file mode 100644 index 0000000..7348c42 --- /dev/null +++ b/docs/cli/index.md @@ -0,0 +1,57 @@ +(cli)= + +# CLI + +g is a minimal CLI wrapper that proxies to your current directory's VCS command. + +## How it works + +When you run `g`, it: + +1. Walks up from your current directory looking for `.git`, `.svn`, or `.hg` +2. Invokes the corresponding VCS (`git`, `svn`, or `hg`) with your arguments +3. Exits after the command completes + +**Note:** `--version`/`-V` is handled by g itself rather than passed to the VCS. + +## Usage + +```console +$ g status +``` + +Is equivalent to: + +```console +$ git status # if in a git repo +$ svn status # if in an svn repo +$ hg status # if in an hg repo +``` + +(cli-main)= + +## Command + +```{eval-rst} +.. argparse:: + :module: g + :func: create_parser + :prog: g +``` + +## Examples + +```console +$ g status +$ g commit -m "Fix bug" +$ g log --oneline -10 +$ g diff HEAD~1 +``` + +## Supported VCS + +| Directory marker | VCS command | +|------------------|-------------| +| `.git` | `git` | +| `.svn` | `svn` | +| `.hg` | `hg` | diff --git a/docs/conf.py b/docs/conf.py index 470ef7e..28743f4 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -41,6 +41,7 @@ "sphinxext.rediraffe", "myst_parser", "linkify_issues", + "argparse_exemplar", ] myst_enable_extensions = [ "colon_fence", @@ -69,7 +70,7 @@ html_favicon = "_static/favicon.ico" html_static_path = ["_static"] -html_css_files = ["css/custom.css"] +html_css_files = ["css/custom.css", "css/argparse-highlight.css"] html_extra_path = ["manifest.json"] html_theme = "furo" html_theme_path: list[str] = [] diff --git a/docs/index.md b/docs/index.md index 9229c83..7ab0dcb 100644 --- a/docs/index.md +++ b/docs/index.md @@ -8,6 +8,7 @@ :hidden: quickstart +cli/index ``` ```{toctree} diff --git a/pyproject.toml b/pyproject.toml index 330594b..2b61669 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,6 +84,9 @@ dev = [ # Lint "ruff", "mypy", + # Type stubs + "types-docutils", + "types-Pygments", ] docs = [ @@ -125,10 +128,25 @@ build-backend = "hatchling.build" [tool.mypy] strict = true python_version = "3.10" +mypy_path = ["src", "docs/_ext"] +explicit_package_bases = true files = [ "src/", "tests/", + "docs/_ext/", ] +exclude = [ + "tests/docs/", + "docs/_ext/conftest\\.py$", +] + +[[tool.mypy.overrides]] +module = [ + "cli_usage_lexer", + "argparse_lexer", + "argparse_roles", +] +ignore_missing_imports = true [tool.ruff] target-version = "py310" diff --git a/src/g/__init__.py b/src/g/__init__.py index c9dcfe0..daea825 100755 --- a/src/g/__init__.py +++ b/src/g/__init__.py @@ -3,7 +3,7 @@ from __future__ import annotations -import io +import argparse import logging import os import pathlib @@ -12,7 +12,9 @@ import typing as t from os import PathLike -__all__ = ["DEFAULT", "run", "sys", "vcspath_registry"] +from g.__about__ import __version__ + +__all__ = ["DEFAULT", "__version__", "create_parser", "run", "sys", "vcspath_registry"] vcspath_registry = {".git": "git", ".svn": "svn", ".hg": "hg"} @@ -28,6 +30,50 @@ def find_repo_type(path: pathlib.Path | str) -> str | None: return None +def create_parser() -> argparse.ArgumentParser: + """Create argument parser for g CLI. + + Returns + ------- + argparse.ArgumentParser + Configured argument parser for the g command. + + Examples + -------- + >>> parser = create_parser() + >>> parser.prog + 'g' + + >>> args = parser.parse_args(['status']) + >>> args.vcs_args + ['status'] + + >>> args = parser.parse_args(['commit', '-m', 'message']) + >>> args.vcs_args + ['commit', '-m', 'message'] + """ + parser = argparse.ArgumentParser( + prog="g", + description="CLI alias for your current directory's VCS command " + "(git, svn, hg).", + epilog="Arguments are passed to the detected VCS (except --version/-V).", + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + "--version", + "-V", + action="version", + version=f"%(prog)s {__version__}", + ) + parser.add_argument( + "vcs_args", + nargs=argparse.REMAINDER, + metavar="...", + help="Arguments passed to the detected VCS (git, svn, or hg)", + ) + return parser + + DEFAULT = object() @@ -47,11 +93,20 @@ def run( returned, it would print ** after command. """ # Interpret default kwargs lazily for mockability of argv - if cmd is DEFAULT: - cmd = find_repo_type(pathlib.Path.cwd()) if cmd_args is DEFAULT: cmd_args = sys.argv[1:] + # Handle --version/-V before VCS detection + assert isinstance(cmd_args, (tuple, list)) + if cmd_args and cmd_args[0] in ("--version", "-V"): + print(f"g {__version__}") + if os.getenv("G_IS_TEST") and __name__ != "__main__": + return None + sys.exit(0) + + if cmd is DEFAULT: + cmd = find_repo_type(pathlib.Path.cwd()) + logging.basicConfig(level=logging.INFO, format="%(message)s") if cmd is None: @@ -59,7 +114,6 @@ def run( log.info(msg) return None - assert isinstance(cmd_args, (tuple, list)) assert isinstance(cmd, (str, bytes, pathlib.Path)) proc = subprocess.Popen([cmd, *cmd_args], **kwargs) diff --git a/tests/docs/__init__.py b/tests/docs/__init__.py new file mode 100644 index 0000000..88826fc --- /dev/null +++ b/tests/docs/__init__.py @@ -0,0 +1,3 @@ +"""tests.docs package.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/__init__.py b/tests/docs/_ext/__init__.py new file mode 100644 index 0000000..daac77f --- /dev/null +++ b/tests/docs/_ext/__init__.py @@ -0,0 +1,3 @@ +"""tests.docs._ext package.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/conftest.py b/tests/docs/_ext/conftest.py new file mode 100644 index 0000000..fa2919b --- /dev/null +++ b/tests/docs/_ext/conftest.py @@ -0,0 +1,11 @@ +"""Fixtures and configuration for docs extension tests.""" + +from __future__ import annotations + +import pathlib +import sys + +# Add docs/_ext to path so we can import the extension module +docs_ext_path = pathlib.Path(__file__).parent.parent.parent.parent / "docs" / "_ext" +if str(docs_ext_path) not in sys.path: + sys.path.insert(0, str(docs_ext_path)) diff --git a/tests/docs/_ext/sphinx_argparse_neo/__init__.py b/tests/docs/_ext/sphinx_argparse_neo/__init__.py new file mode 100644 index 0000000..259f37c --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/__init__.py @@ -0,0 +1,3 @@ +"""Tests for sphinx_argparse_neo extension.""" + +from __future__ import annotations diff --git a/tests/docs/_ext/sphinx_argparse_neo/conftest.py b/tests/docs/_ext/sphinx_argparse_neo/conftest.py new file mode 100644 index 0000000..c805df1 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/conftest.py @@ -0,0 +1,237 @@ +"""Fixtures and configuration for sphinx_argparse_neo tests.""" + +from __future__ import annotations + +import argparse +import pathlib +import sys + +import pytest + +# Add docs/_ext to path so we can import the extension module +docs_ext_path = ( + pathlib.Path(__file__).parent.parent.parent.parent.parent / "docs" / "_ext" +) +if str(docs_ext_path) not in sys.path: + sys.path.insert(0, str(docs_ext_path)) + + +@pytest.fixture +def simple_parser() -> argparse.ArgumentParser: + """Create a simple parser with basic arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with a positional argument and a couple of options. + """ + parser = argparse.ArgumentParser( + prog="myapp", + description="A simple test application", + ) + parser.add_argument("filename", help="Input file to process") + parser.add_argument( + "-v", "--verbose", action="store_true", help="Enable verbose mode" + ) + parser.add_argument("-o", "--output", metavar="FILE", help="Output file") + return parser + + +@pytest.fixture +def parser_with_groups() -> argparse.ArgumentParser: + """Create a parser with custom argument groups. + + Returns + ------- + argparse.ArgumentParser + Parser with multiple argument groups. + """ + parser = argparse.ArgumentParser(prog="grouped", description="Parser with groups") + + input_group = parser.add_argument_group("Input Options", "Options for input") + input_group.add_argument("--input", "-i", required=True, help="Input file") + input_group.add_argument("--format", choices=["json", "yaml"], help="Input format") + + output_group = parser.add_argument_group("Output Options", "Options for output") + output_group.add_argument("--output", "-o", help="Output file") + output_group.add_argument("--pretty", action="store_true", help="Pretty print") + + return parser + + +@pytest.fixture +def parser_with_subcommands() -> argparse.ArgumentParser: + """Create a parser with subcommands. + + Returns + ------- + argparse.ArgumentParser + Parser with subparsers. + """ + parser = argparse.ArgumentParser(prog="cli", description="CLI with subcommands") + parser.add_argument("-v", "--verbose", action="store_true", help="Verbose mode") + + subparsers = parser.add_subparsers(dest="command", help="Available commands") + + # Sync subcommand + sync_parser = subparsers.add_parser("sync", help="Synchronize repositories") + sync_parser.add_argument("repo", nargs="?", help="Repository to sync") + sync_parser.add_argument("-f", "--force", action="store_true", help="Force sync") + + # Add subcommand + add_parser = subparsers.add_parser("add", aliases=["a"], help="Add a repository") + add_parser.add_argument("url", help="Repository URL") + add_parser.add_argument("-n", "--name", help="Repository name") + + return parser + + +@pytest.fixture +def parser_with_mutex() -> argparse.ArgumentParser: + """Create a parser with mutually exclusive arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with mutually exclusive group. + """ + parser = argparse.ArgumentParser(prog="mutex", description="Parser with mutex") + + mutex = parser.add_mutually_exclusive_group(required=True) + mutex.add_argument("-v", "--verbose", action="store_true", help="Verbose output") + mutex.add_argument("-q", "--quiet", action="store_true", help="Quiet output") + + return parser + + +@pytest.fixture +def parser_with_all_actions() -> argparse.ArgumentParser: + """Create a parser with all action types. + + Returns + ------- + argparse.ArgumentParser + Parser demonstrating all action types. + """ + parser = argparse.ArgumentParser(prog="actions", description="All action types") + + # store (default) + parser.add_argument("--name", help="Store action") + + # store_const + parser.add_argument( + "--enable", action="store_const", const="enabled", help="Store const" + ) + + # store_true / store_false + parser.add_argument("--flag", action="store_true", help="Store true") + parser.add_argument("--no-flag", action="store_false", help="Store false") + + # append + parser.add_argument("--item", action="append", help="Append action") + + # append_const + parser.add_argument( + "--debug", + action="append_const", + const="debug", + dest="features", + help="Append const", + ) + + # count + parser.add_argument("-v", "--verbose", action="count", default=0, help="Count") + + # BooleanOptionalAction (Python 3.9+) + parser.add_argument( + "--option", action=argparse.BooleanOptionalAction, help="Boolean optional" + ) + + return parser + + +@pytest.fixture +def parser_with_types() -> argparse.ArgumentParser: + """Create a parser with typed arguments. + + Returns + ------- + argparse.ArgumentParser + Parser with various type specifications. + """ + parser = argparse.ArgumentParser(prog="types", description="Typed arguments") + + parser.add_argument("--count", type=int, help="Integer argument") + parser.add_argument("--ratio", type=float, help="Float argument") + parser.add_argument("--path", type=pathlib.Path, help="Path argument") + parser.add_argument("--choice", type=str, choices=["a", "b", "c"], help="Choices") + + return parser + + +@pytest.fixture +def parser_with_nargs() -> argparse.ArgumentParser: + """Create a parser demonstrating nargs variants. + + Returns + ------- + argparse.ArgumentParser + Parser with various nargs specifications. + """ + parser = argparse.ArgumentParser(prog="nargs", description="Nargs variants") + + parser.add_argument("single", help="Single positional") + parser.add_argument("optional", nargs="?", default="default", help="Optional") + parser.add_argument("zero_or_more", nargs="*", help="Zero or more") + parser.add_argument("--one-or-more", nargs="+", help="One or more") + parser.add_argument("--exactly-two", nargs=2, metavar=("A", "B"), help="Exactly 2") + parser.add_argument("remainder", nargs=argparse.REMAINDER, help="Remainder") + + return parser + + +@pytest.fixture +def parser_with_defaults() -> argparse.ArgumentParser: + """Create a parser with various default values. + + Returns + ------- + argparse.ArgumentParser + Parser demonstrating default handling. + """ + parser = argparse.ArgumentParser(prog="defaults") + + parser.add_argument("--none-default", default=None, help="None default") + parser.add_argument("--string-default", default="hello", help="String default") + parser.add_argument("--int-default", default=42, type=int, help="Int default") + parser.add_argument("--list-default", default=[1, 2], help="List default") + parser.add_argument("--suppress", default=argparse.SUPPRESS, help=argparse.SUPPRESS) + + return parser + + +@pytest.fixture +def nested_subcommands_parser() -> argparse.ArgumentParser: + """Create a parser with nested subcommands. + + Returns + ------- + argparse.ArgumentParser + Parser with multiple levels of subparsers. + """ + parser = argparse.ArgumentParser(prog="nested", description="Nested subcommands") + + level1 = parser.add_subparsers(dest="level1") + + # First level: repo + repo = level1.add_parser("repo", help="Repository commands") + repo_subs = repo.add_subparsers(dest="level2") + + # Second level: repo clone + clone = repo_subs.add_parser("clone", help="Clone a repository") + clone.add_argument("url", help="Repository URL") + + # Second level: repo list + repo_subs.add_parser("list", help="List repositories") + + return parser diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_compat.py b/tests/docs/_ext/sphinx_argparse_neo/test_compat.py new file mode 100644 index 0000000..417e29a --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_compat.py @@ -0,0 +1,330 @@ +"""Tests for sphinx_argparse_neo.compat module.""" + +from __future__ import annotations + +import sys +import typing as t + +import pytest +from sphinx_argparse_neo.compat import ( + MockModule, + get_parser_from_entry_point, + get_parser_from_module, + import_module, + mock_imports, +) + +# --- MockModule tests --- + + +def test_mock_module_name() -> None: + """Test MockModule name attribute.""" + mock = MockModule("mypackage.submodule") + assert mock.__name__ == "mypackage.submodule" + + +def test_mock_module_repr() -> None: + """Test MockModule string representation.""" + mock = MockModule("mypackage") + assert repr(mock) == "" + + +def test_mock_module_getattr() -> None: + """Test MockModule attribute access.""" + mock = MockModule("mypackage") + child = mock.submodule + + assert isinstance(child, MockModule) + assert child.__name__ == "mypackage.submodule" + + +def test_mock_module_nested_getattr() -> None: + """Test MockModule nested attribute access.""" + mock = MockModule("pkg") + deep = mock.level1.level2.level3 + + assert deep.__name__ == "pkg.level1.level2.level3" + + +def test_mock_module_callable() -> None: + """Test MockModule is callable.""" + mock = MockModule("mypackage") + result = mock() + + assert result is mock + + +def test_mock_module_callable_with_args() -> None: + """Test MockModule callable with arguments.""" + mock = MockModule("mypackage") + result = mock(1, 2, 3, key="value") + + assert result is mock + + +def test_mock_module_chained_call() -> None: + """Test MockModule chained attribute access and call.""" + mock = MockModule("pkg") + result = mock.SomeClass() + + assert isinstance(result, MockModule) + + +# --- mock_imports context manager tests --- + + +def test_mock_imports_adds_to_sys_modules() -> None: + """Test that mock_imports adds modules to sys.modules.""" + module_name = "test_fake_module_xyz" + + assert module_name not in sys.modules + + with mock_imports([module_name]): + assert module_name in sys.modules + assert isinstance(sys.modules[module_name], MockModule) + + assert module_name not in sys.modules + + +def test_mock_imports_multiple_modules() -> None: + """Test mocking multiple modules.""" + modules = ["fake_a", "fake_b", "fake_c"] + + with mock_imports(modules): + for name in modules: + assert name in sys.modules + + for name in modules: + assert name not in sys.modules + + +def test_mock_imports_nested_modules() -> None: + """Test mocking nested module paths.""" + modules = ["fake_pkg", "fake_pkg.sub", "fake_pkg.sub.deep"] + + with mock_imports(modules): + for name in modules: + assert name in sys.modules + + for name in modules: + assert name not in sys.modules + + +def test_mock_imports_does_not_override_existing() -> None: + """Test that mock_imports doesn't override existing modules.""" + # argparse is already imported + original = sys.modules["argparse"] + + with mock_imports(["argparse"]): + # Should not be replaced + assert sys.modules["argparse"] is original + + assert sys.modules["argparse"] is original + + +def test_mock_imports_cleanup_on_exception() -> None: + """Test that mock_imports cleans up even on exception.""" + module_name = "fake_exception_test" + exc_msg = "Test exception" + + with pytest.raises(ValueError), mock_imports([module_name]): + assert module_name in sys.modules + raise ValueError(exc_msg) + + assert module_name not in sys.modules + + +def test_mock_imports_allows_import() -> None: + """Test that mocked modules can be imported.""" + module_name = "fake_importable" + + with mock_imports([module_name]): + # This should work without ImportError + import fake_importable # type: ignore[import-not-found] + + assert fake_importable.__name__ == "fake_importable" + + +# --- import_module tests --- + + +def test_import_module_builtin() -> None: + """Test importing a built-in module.""" + mod = import_module("argparse") + assert hasattr(mod, "ArgumentParser") + + +def test_import_module_stdlib() -> None: + """Test importing a stdlib module.""" + mod = import_module("os.path") + assert hasattr(mod, "join") + + +def test_import_module_not_found() -> None: + """Test importing a non-existent module.""" + with pytest.raises(ModuleNotFoundError): + import_module("nonexistent_module_xyz") + + +# --- get_parser_from_module tests --- + + +def test_get_parser_from_module_argparse() -> None: + """Test getting parser from argparse module itself.""" + # Create a test module with a parser factory + import types + + test_module = types.ModuleType("test_parser_module") + + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="test") + + test_module.create_parser = create_parser # type: ignore[attr-defined] + sys.modules["test_parser_module"] = test_module + + try: + parser = get_parser_from_module("test_parser_module", "create_parser") + assert parser.prog == "test" + finally: + del sys.modules["test_parser_module"] + + +def test_get_parser_from_module_with_mock() -> None: + """Test getting parser with mocked dependencies.""" + import types + + test_module = types.ModuleType("test_mock_parser") + + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="mocked") + + test_module.create_parser = create_parser # type: ignore[attr-defined] + sys.modules["test_mock_parser"] = test_module + + try: + parser = get_parser_from_module( + "test_mock_parser", + "create_parser", + mock_modules=["fake_dependency"], + ) + assert parser.prog == "mocked" + finally: + del sys.modules["test_mock_parser"] + + +def test_get_parser_from_module_dotted_path() -> None: + """Test getting parser from class method.""" + import types + + test_module = types.ModuleType("test_class_parser") + + class CLI: + @staticmethod + def create_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="from_class") + + test_module.CLI = CLI # type: ignore[attr-defined] + sys.modules["test_class_parser"] = test_module + + try: + parser = get_parser_from_module("test_class_parser", "CLI.create_parser") + assert parser.prog == "from_class" + finally: + del sys.modules["test_class_parser"] + + +def test_get_parser_from_module_not_found() -> None: + """Test error when module not found.""" + with pytest.raises(ModuleNotFoundError): + get_parser_from_module("nonexistent_xyz", "func") + + +def test_get_parser_from_module_func_not_found() -> None: + """Test error when function not found.""" + with pytest.raises(AttributeError): + get_parser_from_module("argparse", "nonexistent_func") + + +# --- get_parser_from_entry_point tests --- + + +def test_get_parser_from_entry_point_valid() -> None: + """Test parsing valid entry point format.""" + import types + + test_module = types.ModuleType("test_entry_point") + + def get_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="entry") + + test_module.get_parser = get_parser # type: ignore[attr-defined] + sys.modules["test_entry_point"] = test_module + + try: + parser = get_parser_from_entry_point("test_entry_point:get_parser") + assert parser.prog == "entry" + finally: + del sys.modules["test_entry_point"] + + +def test_get_parser_from_entry_point_invalid_format() -> None: + """Test error on invalid entry point format.""" + with pytest.raises(ValueError) as exc_info: + get_parser_from_entry_point("no_colon_separator") + + assert "Invalid entry point format" in str(exc_info.value) + + +def test_get_parser_from_entry_point_with_class() -> None: + """Test entry point with class method.""" + import types + + test_module = types.ModuleType("test_entry_class") + + class Factory: + @staticmethod + def parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="factory") + + test_module.Factory = Factory # type: ignore[attr-defined] + sys.modules["test_entry_class"] = test_module + + try: + parser = get_parser_from_entry_point("test_entry_class:Factory.parser") + assert parser.prog == "factory" + finally: + del sys.modules["test_entry_class"] + + +def test_get_parser_from_entry_point_with_mock() -> None: + """Test entry point with mocked modules.""" + import types + + test_module = types.ModuleType("test_entry_mock") + + def make_parser() -> t.Any: + import argparse + + return argparse.ArgumentParser(prog="with_mock") + + test_module.make_parser = make_parser # type: ignore[attr-defined] + sys.modules["test_entry_mock"] = test_module + + try: + parser = get_parser_from_entry_point( + "test_entry_mock:make_parser", + mock_modules=["some_optional_dep"], + ) + assert parser.prog == "with_mock" + finally: + del sys.modules["test_entry_mock"] diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py b/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py new file mode 100644 index 0000000..1c57631 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_nodes.py @@ -0,0 +1,528 @@ +"""Tests for sphinx_argparse_neo.nodes module.""" + +from __future__ import annotations + +import re +import typing as t + +import pytest +from docutils import nodes +from sphinx_argparse_neo.nodes import ( + _generate_argument_id, + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) + +# --- Node creation tests --- + + +def test_argparse_program_creation() -> None: + """Test creating argparse_program node.""" + node = argparse_program() + node["prog"] = "myapp" + + assert node["prog"] == "myapp" + assert isinstance(node, nodes.General) + assert isinstance(node, nodes.Element) + + +def test_argparse_usage_creation() -> None: + """Test creating argparse_usage node.""" + node = argparse_usage() + node["usage"] = "myapp [-h] [--verbose] command" + + assert node["usage"] == "myapp [-h] [--verbose] command" + + +def test_argparse_group_creation() -> None: + """Test creating argparse_group node.""" + node = argparse_group() + node["title"] = "Output Options" + node["description"] = "Control output format" + + assert node["title"] == "Output Options" + assert node["description"] == "Control output format" + + +def test_argparse_argument_creation() -> None: + """Test creating argparse_argument node.""" + node = argparse_argument() + node["names"] = ["-v", "--verbose"] + node["help"] = "Enable verbose mode" + node["metavar"] = None + node["required"] = False + + assert node["names"] == ["-v", "--verbose"] + assert node["help"] == "Enable verbose mode" + + +def test_argparse_subcommands_creation() -> None: + """Test creating argparse_subcommands node.""" + node = argparse_subcommands() + node["title"] = "Commands" + + assert node["title"] == "Commands" + + +def test_argparse_subcommand_creation() -> None: + """Test creating argparse_subcommand node.""" + node = argparse_subcommand() + node["name"] = "sync" + node["aliases"] = ["s"] + node["help"] = "Synchronize repositories" + + assert node["name"] == "sync" + assert node["aliases"] == ["s"] + + +# --- Node nesting tests --- + + +def test_program_can_contain_usage() -> None: + """Test that program node can contain usage node.""" + program = argparse_program() + program["prog"] = "myapp" + + usage = argparse_usage() + usage["usage"] = "myapp [-h]" + + program.append(usage) + + assert len(program.children) == 1 + assert isinstance(program.children[0], argparse_usage) + + +def test_program_can_contain_groups() -> None: + """Test that program node can contain group nodes.""" + program = argparse_program() + + group1 = argparse_group() + group1["title"] = "Positional Arguments" + + group2 = argparse_group() + group2["title"] = "Optional Arguments" + + program.append(group1) + program.append(group2) + + assert len(program.children) == 2 + + +def test_group_can_contain_arguments() -> None: + """Test that group node can contain argument nodes.""" + group = argparse_group() + group["title"] = "Options" + + arg1 = argparse_argument() + arg1["names"] = ["-v"] + + arg2 = argparse_argument() + arg2["names"] = ["-o"] + + group.append(arg1) + group.append(arg2) + + assert len(group.children) == 2 + + +def test_subcommands_can_contain_subcommand() -> None: + """Test that subcommands container can contain subcommand nodes.""" + container = argparse_subcommands() + container["title"] = "Commands" + + sub1 = argparse_subcommand() + sub1["name"] = "sync" + + sub2 = argparse_subcommand() + sub2["name"] = "add" + + container.append(sub1) + container.append(sub2) + + assert len(container.children) == 2 + + +def test_subcommand_can_contain_program() -> None: + """Test that subcommand can contain nested program (for recursion).""" + subcommand = argparse_subcommand() + subcommand["name"] = "sync" + + nested_program = argparse_program() + nested_program["prog"] = "myapp sync" + + subcommand.append(nested_program) + + assert len(subcommand.children) == 1 + assert isinstance(subcommand.children[0], argparse_program) + + +# --- Attribute handling tests --- + + +def test_argument_with_all_attributes() -> None: + """Test argument node with all possible attributes.""" + node = argparse_argument() + node["names"] = ["-f", "--file"] + node["help"] = "Input file" + node["metavar"] = "FILE" + node["required"] = True + node["default_string"] = "input.txt" + node["choices"] = ["a", "b", "c"] + node["type_name"] = "str" + node["mutex"] = False + node["mutex_required"] = False + + assert node["names"] == ["-f", "--file"] + assert node["help"] == "Input file" + assert node["metavar"] == "FILE" + assert node["required"] is True + assert node["default_string"] == "input.txt" + assert node["choices"] == ["a", "b", "c"] + assert node["type_name"] == "str" + + +def test_argument_with_mutex_marker() -> None: + """Test argument node marked as part of mutex group.""" + node = argparse_argument() + node["names"] = ["-v"] + node["mutex"] = True + node["mutex_required"] = True + + assert node["mutex"] is True + assert node["mutex_required"] is True + + +def test_node_get_with_default() -> None: + """Test getting attributes with defaults.""" + node = argparse_argument() + node["names"] = ["-v"] + + # Attribute that exists + assert node.get("names") == ["-v"] + + # Attribute that doesn't exist - get() returns None + assert node.get("nonexistent") is None + + # Attribute with explicit default + assert node.get("help", "default help") == "default help" + + +# --- Full tree construction test --- + + +def test_full_node_tree() -> None: + """Test constructing a complete node tree.""" + # Root program + program = argparse_program() + program["prog"] = "myapp" + + # Usage + usage = argparse_usage() + usage["usage"] = "myapp [-h] [-v] command" + program.append(usage) + + # Positional group + pos_group = argparse_group() + pos_group["title"] = "Positional Arguments" + + cmd_arg = argparse_argument() + cmd_arg["names"] = ["command"] + cmd_arg["help"] = "Command to run" + pos_group.append(cmd_arg) + program.append(pos_group) + + # Optional group + opt_group = argparse_group() + opt_group["title"] = "Optional Arguments" + + verbose = argparse_argument() + verbose["names"] = ["-v", "--verbose"] + verbose["help"] = "Verbose mode" + opt_group.append(verbose) + program.append(opt_group) + + # Subcommands + subs = argparse_subcommands() + subs["title"] = "Commands" + + sync_sub = argparse_subcommand() + sync_sub["name"] = "sync" + sync_sub["help"] = "Sync repos" + subs.append(sync_sub) + + program.append(subs) + + # Verify tree structure + assert len(program.children) == 4 # usage, pos_group, opt_group, subs + assert isinstance(program.children[0], argparse_usage) + assert isinstance(program.children[1], argparse_group) + assert isinstance(program.children[2], argparse_group) + assert isinstance(program.children[3], argparse_subcommands) + + +# --- ID generation tests --- + + +def test_generate_argument_id_short_option() -> None: + """Test ID generation for short option.""" + assert _generate_argument_id(["-L"]) == "L" + + +def test_generate_argument_id_long_option() -> None: + """Test ID generation for long option.""" + assert _generate_argument_id(["--help"]) == "help" + + +def test_generate_argument_id_multiple_names() -> None: + """Test ID generation for argument with multiple names.""" + assert _generate_argument_id(["-v", "--verbose"]) == "v-verbose" + + +def test_generate_argument_id_with_prefix() -> None: + """Test ID generation with prefix for namespace isolation.""" + assert _generate_argument_id(["-L"], "shell") == "shell-L" + assert _generate_argument_id(["--help"], "load") == "load-help" + + +def test_generate_argument_id_positional() -> None: + """Test ID generation for positional argument.""" + assert _generate_argument_id(["filename"]) == "filename" + + +def test_generate_argument_id_empty() -> None: + """Test ID generation with empty names list.""" + assert _generate_argument_id([]) == "" + + +def test_generate_argument_id_prefix_no_names() -> None: + """Test that prefix alone doesn't create ID when no names.""" + assert _generate_argument_id([], "shell") == "" + + +# --- HTML rendering tests using NamedTuple for parametrization --- + + +class ArgumentHTMLCase(t.NamedTuple): + """Test case for argument HTML rendering.""" + + test_id: str + names: list[str] + metavar: str | None + help_text: str | None + default: str | None + id_prefix: str + expected_patterns: list[str] # Regex patterns to match + + +ARGUMENT_HTML_CASES = [ + ArgumentHTMLCase( + test_id="short_option_with_metavar", + names=["-L"], + metavar="socket-name", + help_text="pass-through for tmux -L", + default="None", + id_prefix="shell", + expected_patterns=[ + r'
', + r'
', + r'-L', + r'socket-name', + r'ΒΆ', + r'
', + r'
Default
', + r'
None
', + r"
", + ], + ), + ArgumentHTMLCase( + test_id="long_option", + names=["--help"], + metavar=None, + help_text="show help", + default=None, + id_prefix="", + expected_patterns=[ + r'--help', + r'id="help"', + r'href="#help"', + ], + ), + ArgumentHTMLCase( + test_id="positional_argument", + names=["filename"], + metavar=None, + help_text="input file", + default=None, + id_prefix="", + expected_patterns=[ + r'filename', + r'id="filename"', + ], + ), + ArgumentHTMLCase( + test_id="multiple_names", + names=["-v", "--verbose"], + metavar=None, + help_text="Enable verbose mode", + default=None, + id_prefix="load", + expected_patterns=[ + r'id="load-v-verbose"', + r'-v', + r'--verbose', + r'href="#load-v-verbose"', + ], + ), +] + + +class MockTranslator: + """Mock HTML5Translator for testing HTML generation.""" + + def __init__(self) -> None: + """Initialize mock translator.""" + self.body: list[str] = [] + + def encode(self, text: str) -> str: + """HTML encode text.""" + return str(text).replace("&", "&").replace("<", "<").replace(">", ">") + + +def render_argument_to_html( + names: list[str], + metavar: str | None, + help_text: str | None, + default: str | None, + id_prefix: str, +) -> str: + """Render an argument node to HTML string for testing. + + Parameters + ---------- + names + Argument names (e.g., ["-v", "--verbose"]). + metavar + Optional metavar (e.g., "FILE"). + help_text + Help text for the argument. + default + Default value string. + id_prefix + Prefix for the argument ID. + + Returns + ------- + str + HTML string from the mock translator's body. + """ + from sphinx_argparse_neo.nodes import ( + depart_argparse_argument_html, + visit_argparse_argument_html, + ) + + node = argparse_argument() + node["names"] = names + node["metavar"] = metavar + node["help"] = help_text + node["default_string"] = default + node["id_prefix"] = id_prefix + + translator = MockTranslator() + visit_argparse_argument_html(translator, node) + depart_argparse_argument_html(translator, node) + + return "".join(translator.body) + + +@pytest.mark.parametrize( + "case", + ARGUMENT_HTML_CASES, + ids=lambda c: c.test_id, +) +def test_argument_html_rendering(case: ArgumentHTMLCase) -> None: + """Test HTML output for argument nodes.""" + html = render_argument_to_html( + names=case.names, + metavar=case.metavar, + help_text=case.help_text, + default=case.default, + id_prefix=case.id_prefix, + ) + + for pattern in case.expected_patterns: + assert re.search(pattern, html), f"Pattern not found: {pattern}\nHTML: {html}" + + +def test_argument_wrapper_has_id() -> None: + """Verify wrapper div has correct ID attribute.""" + html = render_argument_to_html( + names=["-f", "--file"], + metavar="PATH", + help_text="Input file", + default=None, + id_prefix="convert", + ) + + assert 'id="convert-f-file"' in html + assert '
None: + """Verify headerlink anchor exists with correct href.""" + html = render_argument_to_html( + names=["--output"], + metavar="FILE", + help_text="Output file", + default=None, + id_prefix="freeze", + ) + + assert 'ΒΆ' in html + + +def test_default_value_styled() -> None: + """Verify default value is wrapped in nv span.""" + html = render_argument_to_html( + names=["--format"], + metavar=None, + help_text="Output format", + default="json", + id_prefix="", + ) + + assert '
Default
' in html + assert '
json
' in html + + +def test_wrapper_div_closed() -> None: + """Verify wrapper div is properly closed.""" + html = render_argument_to_html( + names=["-v"], + metavar=None, + help_text="Verbose", + default=None, + id_prefix="", + ) + + # Count opening and closing div tags + open_divs = html.count("") + assert open_divs == close_divs, f"Unbalanced divs in HTML: {html}" + + +def test_argument_no_id_prefix() -> None: + """Test argument rendering without ID prefix.""" + html = render_argument_to_html( + names=["--debug"], + metavar=None, + help_text="Enable debug mode", + default=None, + id_prefix="", + ) + + assert 'id="debug"' in html + assert 'href="#debug"' in html diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_parser.py b/tests/docs/_ext/sphinx_argparse_neo/test_parser.py new file mode 100644 index 0000000..48f43d9 --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_parser.py @@ -0,0 +1,524 @@ +"""Tests for sphinx_argparse_neo.parser module.""" + +from __future__ import annotations + +import argparse +import typing as t + +import pytest +from sphinx_argparse_neo.parser import ( + ArgumentInfo, + ParserInfo, + SubcommandInfo, + _extract_argument, + _format_default, + _get_action_name, + _get_type_name, + extract_parser, +) + +# --- _format_default tests --- + + +class FormatDefaultFixture(t.NamedTuple): + """Test fixture for _format_default function.""" + + test_id: str + default: t.Any + expected: str | None + + +FORMAT_DEFAULT_FIXTURES: list[FormatDefaultFixture] = [ + FormatDefaultFixture( + test_id="none_value", + default=None, + expected="None", + ), + FormatDefaultFixture( + test_id="string_value", + default="hello", + expected="hello", + ), + FormatDefaultFixture( + test_id="integer_value", + default=42, + expected="42", + ), + FormatDefaultFixture( + test_id="float_value", + default=3.14, + expected="3.14", + ), + FormatDefaultFixture( + test_id="list_value", + default=[1, 2, 3], + expected="[1, 2, 3]", + ), + FormatDefaultFixture( + test_id="suppress_value", + default=argparse.SUPPRESS, + expected=None, + ), + FormatDefaultFixture( + test_id="empty_string", + default="", + expected="", + ), + FormatDefaultFixture( + test_id="boolean_true", + default=True, + expected="True", + ), + FormatDefaultFixture( + test_id="boolean_false", + default=False, + expected="False", + ), +] + + +@pytest.mark.parametrize( + FormatDefaultFixture._fields, + FORMAT_DEFAULT_FIXTURES, + ids=[f.test_id for f in FORMAT_DEFAULT_FIXTURES], +) +def test_format_default(test_id: str, default: t.Any, expected: str | None) -> None: + """Test default value formatting.""" + assert _format_default(default) == expected + + +# --- _get_type_name tests --- + + +def test_get_type_name_int() -> None: + """Test type name extraction for int.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--count", type=int) + assert _get_type_name(action) == "int" + + +def test_get_type_name_float() -> None: + """Test type name extraction for float.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--ratio", type=float) + assert _get_type_name(action) == "float" + + +def test_get_type_name_str() -> None: + """Test type name extraction for str.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--name", type=str) + assert _get_type_name(action) == "str" + + +def test_get_type_name_none() -> None: + """Test type name extraction when no type specified.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--name") + assert _get_type_name(action) is None + + +def test_get_type_name_callable() -> None: + """Test type name extraction for callable types.""" + from pathlib import Path + + parser = argparse.ArgumentParser() + action = parser.add_argument("--path", type=Path) + assert _get_type_name(action) == "Path" + + +# --- _get_action_name tests --- + + +class ActionNameFixture(t.NamedTuple): + """Test fixture for _get_action_name function.""" + + test_id: str + action_kwargs: dict[str, t.Any] + expected: str + + +ACTION_NAME_FIXTURES: list[ActionNameFixture] = [ + ActionNameFixture( + test_id="store_default", + action_kwargs={"dest": "name"}, + expected="store", + ), + ActionNameFixture( + test_id="store_true", + action_kwargs={"action": "store_true", "dest": "flag"}, + expected="store_true", + ), + ActionNameFixture( + test_id="store_false", + action_kwargs={"action": "store_false", "dest": "flag"}, + expected="store_false", + ), + ActionNameFixture( + test_id="store_const", + action_kwargs={"action": "store_const", "const": "value", "dest": "const"}, + expected="store_const", + ), + ActionNameFixture( + test_id="append", + action_kwargs={"action": "append", "dest": "items"}, + expected="append", + ), + ActionNameFixture( + test_id="count", + action_kwargs={"action": "count", "dest": "verbose"}, + expected="count", + ), +] + + +@pytest.mark.parametrize( + ActionNameFixture._fields, + ACTION_NAME_FIXTURES, + ids=[f.test_id for f in ACTION_NAME_FIXTURES], +) +def test_get_action_name( + test_id: str, action_kwargs: dict[str, t.Any], expected: str +) -> None: + """Test action name extraction.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--test", **action_kwargs) + assert _get_action_name(action) == expected + + +# --- _extract_argument tests --- + + +def test_extract_argument_positional() -> None: + """Test extracting a positional argument.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("filename", help="Input file") + info = _extract_argument(action) + + assert info.names == ["filename"] + assert info.help == "Input file" + assert info.is_positional is True + assert info.required is True + + +def test_extract_argument_optional() -> None: + """Test extracting an optional argument.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("-v", "--verbose", action="store_true", help="Verbose") + info = _extract_argument(action) + + assert info.names == ["-v", "--verbose"] + assert info.action == "store_true" + assert info.is_positional is False + assert info.required is False + + +def test_extract_argument_with_choices() -> None: + """Test extracting argument with choices.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--format", choices=["json", "yaml", "xml"]) + info = _extract_argument(action) + + assert info.choices == ["json", "yaml", "xml"] + + +def test_extract_argument_with_metavar() -> None: + """Test extracting argument with metavar.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--output", metavar="FILE") + info = _extract_argument(action) + + assert info.metavar == "FILE" + + +def test_extract_argument_tuple_metavar() -> None: + """Test extracting argument with tuple metavar.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--range", nargs=2, metavar=("MIN", "MAX")) + info = _extract_argument(action) + + assert info.metavar == "MIN MAX" + + +def test_extract_argument_suppressed_help() -> None: + """Test extracting argument with suppressed help.""" + parser = argparse.ArgumentParser() + action = parser.add_argument("--secret", help=argparse.SUPPRESS) + info = _extract_argument(action) + + assert info.help is None + + +# --- extract_parser integration tests --- + + +def test_extract_parser_simple(simple_parser: argparse.ArgumentParser) -> None: + """Test extracting a simple parser.""" + info = extract_parser(simple_parser) + + assert info.prog == "myapp" + assert info.description == "A simple test application" + assert len(info.argument_groups) >= 1 + + # Find arguments + all_args = [arg for group in info.argument_groups for arg in group.arguments] + arg_names = [name for arg in all_args for name in arg.names] + + assert "filename" in arg_names + assert "--verbose" in arg_names or "-v" in arg_names + + +def test_extract_parser_with_groups( + parser_with_groups: argparse.ArgumentParser, +) -> None: + """Test extracting parser with custom groups.""" + info = extract_parser(parser_with_groups) + + group_titles = [g.title for g in info.argument_groups] + assert "Input Options" in group_titles + assert "Output Options" in group_titles + + +def test_extract_parser_with_subcommands( + parser_with_subcommands: argparse.ArgumentParser, +) -> None: + """Test extracting parser with subcommands.""" + info = extract_parser(parser_with_subcommands) + + assert info.subcommands is not None + assert len(info.subcommands) == 2 + + subcmd_names = [s.name for s in info.subcommands] + assert "sync" in subcmd_names + assert "add" in subcmd_names + + # Check alias + add_cmd = next(s for s in info.subcommands if s.name == "add") + assert "a" in add_cmd.aliases + + +def test_extract_parser_with_mutex(parser_with_mutex: argparse.ArgumentParser) -> None: + """Test extracting parser with mutually exclusive group.""" + info = extract_parser(parser_with_mutex) + + # Find the group with mutex + for group in info.argument_groups: + if group.mutually_exclusive: + mutex = group.mutually_exclusive[0] + assert mutex.required is True + assert len(mutex.arguments) == 2 + return + + pytest.fail("No mutually exclusive group found") + + +def test_extract_parser_with_all_actions( + parser_with_all_actions: argparse.ArgumentParser, +) -> None: + """Test extracting parser with all action types.""" + info = extract_parser(parser_with_all_actions) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + actions = {arg.dest: arg.action for arg in all_args} + + assert actions.get("name") == "store" + assert actions.get("enable") == "store_const" + assert actions.get("flag") == "store_true" + assert actions.get("item") == "append" + assert actions.get("verbose") == "count" + + +def test_extract_parser_with_types( + parser_with_types: argparse.ArgumentParser, +) -> None: + """Test extracting parser with typed arguments.""" + info = extract_parser(parser_with_types) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + types = {arg.dest: arg.type_name for arg in all_args} + + assert types.get("count") == "int" + assert types.get("ratio") == "float" + assert types.get("path") == "Path" + + +def test_extract_parser_with_nargs( + parser_with_nargs: argparse.ArgumentParser, +) -> None: + """Test extracting parser with nargs variants.""" + info = extract_parser(parser_with_nargs) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + nargs_map = {arg.dest: arg.nargs for arg in all_args} + + assert nargs_map.get("optional") == "?" + assert nargs_map.get("zero_or_more") == "*" + assert nargs_map.get("one_or_more") == "+" + assert nargs_map.get("exactly_two") == 2 + + +def test_extract_parser_with_defaults( + parser_with_defaults: argparse.ArgumentParser, +) -> None: + """Test extracting parser with various defaults.""" + info = extract_parser(parser_with_defaults) + + all_args = [arg for group in info.argument_groups for arg in group.arguments] + defaults = {arg.dest: arg.default_string for arg in all_args} + + assert defaults.get("none_default") == "None" + assert defaults.get("string_default") == "hello" + assert defaults.get("int_default") == "42" + # Suppressed default should have None default_string + assert "suppress" not in defaults or defaults.get("suppress") is None + + +def test_extract_parser_nested_subcommands( + nested_subcommands_parser: argparse.ArgumentParser, +) -> None: + """Test extracting parser with nested subcommands.""" + info = extract_parser(nested_subcommands_parser) + + assert info.subcommands is not None + assert len(info.subcommands) == 1 + + repo = info.subcommands[0] + assert repo.name == "repo" + assert repo.parser.subcommands is not None + assert len(repo.parser.subcommands) == 2 + + +def test_extract_parser_usage_generation() -> None: + """Test usage string generation.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("file") + parser.add_argument("-v", "--verbose", action="store_true") + + info = extract_parser(parser) + + assert "test" in info.bare_usage + assert "file" in info.bare_usage + + +def test_extract_parser_custom_usage() -> None: + """Test parser with custom usage string.""" + parser = argparse.ArgumentParser(prog="test", usage="test [options] file") + + info = extract_parser(parser) + + assert info.usage == "test [options] file" + + +def test_extract_parser_with_epilog() -> None: + """Test parser with epilog.""" + parser = argparse.ArgumentParser( + prog="test", + epilog="For more info, see docs.", + ) + + info = extract_parser(parser) + + assert info.epilog == "For more info, see docs." + + +# --- ArgumentInfo property tests --- + + +def test_argument_info_is_positional_true() -> None: + """Test is_positional for positional argument.""" + info = ArgumentInfo( + names=["filename"], + help=None, + default=None, + default_string=None, + choices=None, + required=True, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="filename", + ) + assert info.is_positional is True + + +def test_argument_info_is_positional_false() -> None: + """Test is_positional for optional argument.""" + info = ArgumentInfo( + names=["-f", "--file"], + help=None, + default=None, + default_string=None, + choices=None, + required=False, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="file", + ) + assert info.is_positional is False + + +def test_argument_info_empty_names() -> None: + """Test is_positional with empty names list.""" + info = ArgumentInfo( + names=[], + help=None, + default=None, + default_string=None, + choices=None, + required=False, + metavar=None, + nargs=None, + action="store", + type_name=None, + const=None, + dest="empty", + ) + assert info.is_positional is False + + +# --- Dataclass tests --- + + +def test_parser_info_dataclass() -> None: + """Test ParserInfo dataclass creation.""" + info = ParserInfo( + prog="test", + usage=None, + bare_usage="test [-h]", + description="Test description", + epilog="Test epilog", + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + assert info.prog == "test" + assert info.description == "Test description" + + +def test_subcommand_info_recursive() -> None: + """Test SubcommandInfo with nested parser.""" + nested_info = ParserInfo( + prog="nested", + usage=None, + bare_usage="nested [-h]", + description=None, + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + sub = SubcommandInfo( + name="sub", + aliases=[], + help="Subcommand help", + parser=nested_info, + ) + + assert sub.parser.prog == "nested" diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py b/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py new file mode 100644 index 0000000..0a0b0da --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_renderer.py @@ -0,0 +1,498 @@ +"""Tests for sphinx_argparse_neo.renderer module.""" + +from __future__ import annotations + +import argparse +import typing as t + +from docutils import nodes +from sphinx_argparse_neo.nodes import ( + argparse_argument, + argparse_group, + argparse_program, + argparse_subcommand, + argparse_subcommands, + argparse_usage, +) +from sphinx_argparse_neo.parser import ( + ArgumentGroup, + ArgumentInfo, + MutuallyExclusiveGroup, + ParserInfo, + SubcommandInfo, + extract_parser, +) +from sphinx_argparse_neo.renderer import ( + ArgparseRenderer, + RenderConfig, + create_renderer, +) + +# --- RenderConfig tests --- + + +def test_render_config_defaults() -> None: + """Test RenderConfig default values.""" + config = RenderConfig() + + assert config.group_title_prefix == "" + assert config.show_defaults is True + assert config.show_choices is True + assert config.show_types is True + + +def test_render_config_custom_values() -> None: + """Test RenderConfig with custom values.""" + config = RenderConfig( + group_title_prefix="CLI ", + show_defaults=False, + show_choices=False, + show_types=False, + ) + + assert config.group_title_prefix == "CLI " + assert config.show_defaults is False + assert config.show_choices is False + assert config.show_types is False + + +# --- ArgparseRenderer basic tests --- + + +def test_renderer_creation_default_config() -> None: + """Test creating renderer with default config.""" + renderer = ArgparseRenderer() + + assert renderer.config is not None + assert renderer.config.show_defaults is True + + +def test_renderer_creation_custom_config() -> None: + """Test creating renderer with custom config.""" + config = RenderConfig(group_title_prefix="CLI ") + renderer = ArgparseRenderer(config=config) + + assert renderer.config.group_title_prefix == "CLI " + + +def test_create_renderer_factory() -> None: + """Test create_renderer factory function.""" + renderer = create_renderer() + assert isinstance(renderer, ArgparseRenderer) + + +def test_create_renderer_with_config() -> None: + """Test create_renderer with custom config.""" + config = RenderConfig(show_types=False) + renderer = create_renderer(config=config) + + assert renderer.config.show_types is False + + +# --- Render method tests --- + + +def test_render_simple_parser(simple_parser: argparse.ArgumentParser) -> None: + """Test rendering a simple parser produces sibling nodes for TOC. + + The renderer now outputs sections as siblings of argparse_program: + - argparse_program (description only) + - section#usage + - section#positional-arguments + - section#options + """ + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + rendered_nodes = renderer.render(parser_info) + + # Should have multiple nodes: program + usage section + group sections + assert len(rendered_nodes) >= 3 + + # First node is argparse_program + assert isinstance(rendered_nodes[0], argparse_program) + assert rendered_nodes[0]["prog"] == "myapp" + + # Second node should be usage section + assert isinstance(rendered_nodes[1], nodes.section) + assert "usage" in rendered_nodes[1]["ids"] + + +def test_render_includes_usage(simple_parser: argparse.ArgumentParser) -> None: + """Test that render includes usage as a sibling section.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + rendered_nodes = renderer.render(parser_info) + + # Find the usage section (sibling of program, not child) + usage_sections = [ + n + for n in rendered_nodes + if isinstance(n, nodes.section) and "usage" in n.get("ids", []) + ] + + assert len(usage_sections) == 1 + + # Usage section should contain argparse_usage node + usage_section = usage_sections[0] + usage_node = [c for c in usage_section.children if isinstance(c, argparse_usage)] + assert len(usage_node) == 1 + assert "myapp" in usage_node[0]["usage"] + + +def test_render_includes_groups(simple_parser: argparse.ArgumentParser) -> None: + """Test that render includes argument groups as sibling sections.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + rendered_nodes = renderer.render(parser_info) + + # Groups are now wrapped in sections and are siblings of program + # Find sections that contain argparse_group nodes + group_sections = [ + n + for n in rendered_nodes + if isinstance(n, nodes.section) + and any(isinstance(c, argparse_group) for c in n.children) + ] + + assert len(group_sections) >= 1 + + +def test_render_groups_contain_arguments( + simple_parser: argparse.ArgumentParser, +) -> None: + """Test that rendered groups contain argument nodes.""" + parser_info = extract_parser(simple_parser) + renderer = ArgparseRenderer() + rendered_nodes = renderer.render(parser_info) + + # Find sections that contain argparse_group nodes + group_sections = [ + n + for n in rendered_nodes + if isinstance(n, nodes.section) + and any(isinstance(c, argparse_group) for c in n.children) + ] + + # Collect all arguments from groups inside sections + all_args: list[argparse_argument] = [] + for section in group_sections: + for child in section.children: + if isinstance(child, argparse_group): + all_args.extend( + arg for arg in child.children if isinstance(arg, argparse_argument) + ) + + assert len(all_args) >= 1 + + +def test_render_with_subcommands( + parser_with_subcommands: argparse.ArgumentParser, +) -> None: + """Test rendering parser with subcommands.""" + parser_info = extract_parser(parser_with_subcommands) + renderer = ArgparseRenderer() + rendered_nodes = renderer.render(parser_info) + + # Subcommands node is a sibling of program + subcommands_nodes = [ + n for n in rendered_nodes if isinstance(n, argparse_subcommands) + ] + + assert len(subcommands_nodes) == 1 + + # Check subcommand children + subs_container = subcommands_nodes[0] + subcmd_nodes = [ + c for c in subs_container.children if isinstance(c, argparse_subcommand) + ] + assert len(subcmd_nodes) == 2 + + +# --- Config option effect tests --- + + +def _collect_args_from_rendered_nodes( + rendered_nodes: list[nodes.Node], +) -> list[argparse_argument]: + """Collect all argparse_argument nodes from rendered output.""" + all_args: list[argparse_argument] = [] + for node in rendered_nodes: + if isinstance(node, nodes.section): + for child in node.children: + if isinstance(child, argparse_group): + all_args.extend( + arg + for arg in child.children + if isinstance(arg, argparse_argument) + ) + return all_args + + +def test_render_group_title_prefix() -> None: + """Test that group_title_prefix is applied to section titles.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--opt") + parser_info = extract_parser(parser) + + config = RenderConfig(group_title_prefix="CLI: ") + renderer = ArgparseRenderer(config=config) + rendered_nodes = renderer.render(parser_info) + + # Find sections that contain argparse_group + group_sections = [ + n + for n in rendered_nodes + if isinstance(n, nodes.section) + and any(isinstance(c, argparse_group) for c in n.children) + ] + + # Section IDs should include the prefix (normalized) + ids = [section["ids"][0] for section in group_sections if section["ids"]] + assert any("cli:" in id_str.lower() for id_str in ids) + + +def test_render_show_defaults_false() -> None: + """Test that show_defaults=False hides defaults.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--opt", default="value") + parser_info = extract_parser(parser) + + config = RenderConfig(show_defaults=False) + renderer = ArgparseRenderer(config=config) + rendered_nodes = renderer.render(parser_info) + + all_args = _collect_args_from_rendered_nodes(rendered_nodes) + + # Default string should not be set + for arg in all_args: + assert arg.get("default_string") is None + + +def test_render_show_choices_false() -> None: + """Test that show_choices=False hides choices.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--format", choices=["json", "yaml"]) + parser_info = extract_parser(parser) + + config = RenderConfig(show_choices=False) + renderer = ArgparseRenderer(config=config) + rendered_nodes = renderer.render(parser_info) + + all_args = _collect_args_from_rendered_nodes(rendered_nodes) + + # Choices should not be set + for arg in all_args: + assert arg.get("choices") is None + + +def test_render_show_types_false() -> None: + """Test that show_types=False hides type info.""" + parser = argparse.ArgumentParser(prog="test") + parser.add_argument("--count", type=int) + parser_info = extract_parser(parser) + + config = RenderConfig(show_types=False) + renderer = ArgparseRenderer(config=config) + rendered_nodes = renderer.render(parser_info) + + all_args = _collect_args_from_rendered_nodes(rendered_nodes) + + # Type name should not be set + for arg in all_args: + assert arg.get("type_name") is None + + +# --- Individual render method tests --- + + +def test_render_usage_method() -> None: + """Test render_usage method directly.""" + parser_info = ParserInfo( + prog="test", + usage=None, + bare_usage="test [-h] [-v]", + description=None, + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + renderer = ArgparseRenderer() + usage_node = renderer.render_usage(parser_info) + + assert isinstance(usage_node, argparse_usage) + assert usage_node["usage"] == "test [-h] [-v]" + + +def test_render_argument_method() -> None: + """Test render_argument method directly.""" + arg_info = ArgumentInfo( + names=["-v", "--verbose"], + help="Enable verbose mode", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ) + + renderer = ArgparseRenderer() + arg_node = renderer.render_argument(arg_info) + + assert isinstance(arg_node, argparse_argument) + assert arg_node["names"] == ["-v", "--verbose"] + assert arg_node["help"] == "Enable verbose mode" + + +def test_render_group_method() -> None: + """Test render_group method directly.""" + group_info = ArgumentGroup( + title="Options", + description="Available options", + arguments=[ + ArgumentInfo( + names=["-v"], + help="Verbose", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ), + ], + mutually_exclusive=[], + ) + + renderer = ArgparseRenderer() + group_node = renderer.render_group(group_info) + + assert isinstance(group_node, argparse_group) + assert group_node["title"] == "Options" + assert group_node["description"] == "Available options" + assert len(group_node.children) == 1 + + +def test_render_mutex_group_method() -> None: + """Test render_mutex_group method.""" + mutex = MutuallyExclusiveGroup( + arguments=[ + ArgumentInfo( + names=["-v"], + help="Verbose", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="verbose", + ), + ArgumentInfo( + names=["-q"], + help="Quiet", + default=False, + default_string="False", + choices=None, + required=False, + metavar=None, + nargs=None, + action="store_true", + type_name=None, + const=True, + dest="quiet", + ), + ], + required=True, + ) + + renderer = ArgparseRenderer() + nodes = renderer.render_mutex_group(mutex) + + assert len(nodes) == 2 + assert all(isinstance(n, argparse_argument) for n in nodes) + assert all(n.get("mutex") is True for n in nodes) + assert all(n.get("mutex_required") is True for n in nodes) + + +def test_render_subcommand_method() -> None: + """Test render_subcommand method.""" + nested_parser = ParserInfo( + prog="myapp sub", + usage=None, + bare_usage="myapp sub [-h]", + description="Subcommand description", + epilog=None, + argument_groups=[], + subcommands=None, + subcommand_dest=None, + ) + + subcmd_info = SubcommandInfo( + name="sub", + aliases=["s"], + help="Subcommand help", + parser=nested_parser, + ) + + renderer = ArgparseRenderer() + subcmd_node = renderer.render_subcommand(subcmd_info) + + assert isinstance(subcmd_node, argparse_subcommand) + assert subcmd_node["name"] == "sub" + assert subcmd_node["aliases"] == ["s"] + assert subcmd_node["help"] == "Subcommand help" + + # Should have nested program + nested = [c for c in subcmd_node.children if isinstance(c, argparse_program)] + assert len(nested) == 1 + + +# --- Post-process hook test --- + + +def test_post_process_default() -> None: + """Test that default post_process returns nodes unchanged.""" + renderer = ArgparseRenderer() + + from docutils import nodes as dn + + input_nodes = [dn.paragraph(text="test")] + + result = renderer.post_process(input_nodes) + + assert result == input_nodes + + +def test_post_process_custom() -> None: + """Test custom post_process implementation.""" + + class CustomRenderer(ArgparseRenderer): # type: ignore[misc] + def post_process(self, result_nodes: list[t.Any]) -> list[t.Any]: + # Add a marker to each node + for node in result_nodes: + node["custom_marker"] = True + return result_nodes + + renderer = CustomRenderer() + + from docutils import nodes as dn + + input_nodes = [dn.paragraph(text="test")] + + result = renderer.post_process(input_nodes) + + assert result[0].get("custom_marker") is True diff --git a/tests/docs/_ext/sphinx_argparse_neo/test_utils.py b/tests/docs/_ext/sphinx_argparse_neo/test_utils.py new file mode 100644 index 0000000..6d7049a --- /dev/null +++ b/tests/docs/_ext/sphinx_argparse_neo/test_utils.py @@ -0,0 +1,162 @@ +"""Tests for sphinx_argparse_neo text processing utilities.""" + +from __future__ import annotations + +import typing as t + +import pytest +from sphinx_argparse_neo.utils import escape_rst_emphasis, strip_ansi + +# --- strip_ansi tests --- + + +class StripAnsiFixture(t.NamedTuple): + """Test fixture for strip_ansi function.""" + + test_id: str + input_text: str + expected: str + + +STRIP_ANSI_FIXTURES: list[StripAnsiFixture] = [ + StripAnsiFixture( + test_id="plain_text", + input_text="hello", + expected="hello", + ), + StripAnsiFixture( + test_id="green_color", + input_text="\033[32mgreen\033[0m", + expected="green", + ), + StripAnsiFixture( + test_id="bold_blue", + input_text="\033[1;34mbold\033[0m", + expected="bold", + ), + StripAnsiFixture( + test_id="multiple_codes", + input_text="\033[1m\033[32mtest\033[0m", + expected="test", + ), + StripAnsiFixture( + test_id="empty_string", + input_text="", + expected="", + ), + StripAnsiFixture( + test_id="mixed_content", + input_text="pre\033[31mred\033[0mpost", + expected="preredpost", + ), + StripAnsiFixture( + test_id="reset_only", + input_text="\033[0m", + expected="", + ), + StripAnsiFixture( + test_id="sgr_params", + input_text="\033[38;5;196mred256\033[0m", + expected="red256", + ), +] + + +@pytest.mark.parametrize( + StripAnsiFixture._fields, + STRIP_ANSI_FIXTURES, + ids=[f.test_id for f in STRIP_ANSI_FIXTURES], +) +def test_strip_ansi(test_id: str, input_text: str, expected: str) -> None: + """Test ANSI escape code stripping.""" + assert strip_ansi(input_text) == expected + + +# --- escape_rst_emphasis tests --- + + +class EscapeRstEmphasisFixture(t.NamedTuple): + """Test fixture for escape_rst_emphasis function.""" + + test_id: str + input_text: str + expected: str + + +ESCAPE_RST_EMPHASIS_FIXTURES: list[EscapeRstEmphasisFixture] = [ + EscapeRstEmphasisFixture( + test_id="glob_pattern_quoted", + input_text='myapp load "my-*"', + expected='myapp load "my-\\*"', + ), + EscapeRstEmphasisFixture( + test_id="glob_pattern_django", + input_text="django-*", + expected="django-\\*", + ), + EscapeRstEmphasisFixture( + test_id="glob_pattern_flask", + input_text="flask-*", + expected="flask-\\*", + ), + EscapeRstEmphasisFixture( + test_id="multiple_patterns", + input_text="match django-* or flask-* packages", + expected="match django-\\* or flask-\\* packages", + ), + EscapeRstEmphasisFixture( + test_id="plain_text", + input_text="plain text without patterns", + expected="plain text without patterns", + ), + EscapeRstEmphasisFixture( + test_id="rst_emphasis_unchanged", + input_text="*emphasis* is ok", + expected="*emphasis* is ok", + ), + EscapeRstEmphasisFixture( + test_id="already_escaped", + input_text="django-\\*", + expected="django-\\*", + ), + EscapeRstEmphasisFixture( + test_id="empty_string", + input_text="", + expected="", + ), + EscapeRstEmphasisFixture( + test_id="pattern_at_end", + input_text="ending with pattern-*", + expected="ending with pattern-\\*", + ), + EscapeRstEmphasisFixture( + test_id="hyphen_without_asterisk", + input_text="word-with-hyphens", + expected="word-with-hyphens", + ), + EscapeRstEmphasisFixture( + test_id="asterisk_without_hyphen", + input_text="asterisk * alone", + expected="asterisk * alone", + ), + EscapeRstEmphasisFixture( + test_id="double_asterisk", + input_text="glob-** pattern", + expected="glob-** pattern", + ), + EscapeRstEmphasisFixture( + test_id="space_after_asterisk", + input_text="word-* followed by space", + expected="word-\\* followed by space", + ), +] + + +@pytest.mark.parametrize( + EscapeRstEmphasisFixture._fields, + ESCAPE_RST_EMPHASIS_FIXTURES, + ids=[f.test_id for f in ESCAPE_RST_EMPHASIS_FIXTURES], +) +def test_escape_rst_emphasis(test_id: str, input_text: str, expected: str) -> None: + """Test RST emphasis escaping for glob patterns.""" + assert escape_rst_emphasis(input_text) == expected diff --git a/tests/docs/_ext/test_argparse_lexer.py b/tests/docs/_ext/test_argparse_lexer.py new file mode 100644 index 0000000..3635a0c --- /dev/null +++ b/tests/docs/_ext/test_argparse_lexer.py @@ -0,0 +1,825 @@ +"""Tests for argparse_lexer Pygments extension.""" + +from __future__ import annotations + +import typing as t + +import pytest +from argparse_lexer import ( + ArgparseHelpLexer, + ArgparseLexer, + ArgparseUsageLexer, + tokenize_argparse, + tokenize_usage, +) + +# --- Helper to extract token type names --- + + +def get_tokens(text: str, lexer_class: type = ArgparseLexer) -> list[tuple[str, str]]: + """Get tokens as (type_name, value) tuples. + + Examples + -------- + >>> tokens = get_tokens("usage: cmd [-h]") + >>> any(t[0] == "Token.Name.Attribute" for t in tokens) + True + """ + lexer = lexer_class() + return [ + (str(tok_type), tok_value) for tok_type, tok_value in lexer.get_tokens(text) + ] + + +def get_usage_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens using ArgparseUsageLexer. + + Examples + -------- + >>> tokens = get_usage_tokens("usage: cmd") + >>> tokens[0] + ('Token.Generic.Heading', 'usage:') + """ + return get_tokens(text, ArgparseUsageLexer) + + +def get_help_tokens(text: str) -> list[tuple[str, str]]: + """Get tokens using ArgparseHelpLexer. + + Examples + -------- + >>> tokens = get_help_tokens("positional arguments:") + >>> any("Subheading" in t[0] for t in tokens) + True + """ + return get_tokens(text, ArgparseHelpLexer) + + +# --- Token type fixtures --- + + +class TokenTypeFixture(t.NamedTuple): + """Test fixture for verifying specific token types.""" + + test_id: str + input_text: str + expected_token_type: str + expected_value: str + + +TOKEN_TYPE_FIXTURES: list[TokenTypeFixture] = [ + TokenTypeFixture( + test_id="usage_heading", + input_text="usage:", + expected_token_type="Token.Generic.Heading", + expected_value="usage:", + ), + TokenTypeFixture( + test_id="short_option", + input_text="-h", + expected_token_type="Token.Name.Attribute", + expected_value="-h", + ), + TokenTypeFixture( + test_id="short_option_v", + input_text="-v", + expected_token_type="Token.Name.Attribute", + expected_value="-v", + ), + TokenTypeFixture( + test_id="long_option", + input_text="--verbose", + expected_token_type="Token.Name.Tag", + expected_value="--verbose", + ), + TokenTypeFixture( + test_id="long_option_with_dashes", + input_text="--no-color", + expected_token_type="Token.Name.Tag", + expected_value="--no-color", + ), + TokenTypeFixture( + test_id="uppercase_metavar", + input_text="FILE", + expected_token_type="Token.Name.Variable", + expected_value="FILE", + ), + TokenTypeFixture( + test_id="uppercase_metavar_path", + input_text="PATH", + expected_token_type="Token.Name.Variable", + expected_value="PATH", + ), + TokenTypeFixture( + test_id="uppercase_metavar_with_underscore", + input_text="FILE_PATH", + expected_token_type="Token.Name.Variable", + expected_value="FILE_PATH", + ), + TokenTypeFixture( + test_id="command_name", + input_text="sync", + expected_token_type="Token.Name.Label", + expected_value="sync", + ), + TokenTypeFixture( + test_id="command_with_dash", + input_text="repo-name", + expected_token_type="Token.Name.Label", + expected_value="repo-name", + ), + TokenTypeFixture( + test_id="open_bracket", + input_text="[", + expected_token_type="Token.Punctuation", + expected_value="[", + ), + TokenTypeFixture( + test_id="close_bracket", + input_text="]", + expected_token_type="Token.Punctuation", + expected_value="]", + ), + TokenTypeFixture( + test_id="open_paren", + input_text="(", + expected_token_type="Token.Punctuation", + expected_value="(", + ), + TokenTypeFixture( + test_id="close_paren", + input_text=")", + expected_token_type="Token.Punctuation", + expected_value=")", + ), + TokenTypeFixture( + test_id="open_brace", + input_text="{", + expected_token_type="Token.Punctuation", + expected_value="{", + ), + TokenTypeFixture( + test_id="pipe_operator", + input_text="|", + expected_token_type="Token.Operator", + expected_value="|", + ), + TokenTypeFixture( + test_id="ellipsis", + input_text="...", + expected_token_type="Token.Punctuation", + expected_value="...", + ), +] + + +@pytest.mark.parametrize( + list(TokenTypeFixture._fields), + TOKEN_TYPE_FIXTURES, + ids=[f.test_id for f in TOKEN_TYPE_FIXTURES], +) +def test_token_type( + test_id: str, + input_text: str, + expected_token_type: str, + expected_value: str, +) -> None: + """Test individual token type detection.""" + tokens = get_usage_tokens(input_text) + # Find the expected token (skip whitespace) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t and v.strip()] + assert len(non_ws_tokens) >= 1, f"No non-whitespace tokens found for '{input_text}'" + token_type, token_value = non_ws_tokens[0] + assert token_type == expected_token_type, ( + f"Expected {expected_token_type}, got {token_type}" + ) + assert token_value == expected_value + + +# --- Choice fixtures --- + + +class ChoiceFixture(t.NamedTuple): + """Test fixture for choice enumeration patterns.""" + + test_id: str + input_text: str + expected_choices: list[str] + + +CHOICE_FIXTURES: list[ChoiceFixture] = [ + ChoiceFixture( + test_id="simple_choices", + input_text="{json,yaml,table}", + expected_choices=["json", "yaml", "table"], + ), + ChoiceFixture( + test_id="numeric_choices", + input_text="{1,2,3}", + expected_choices=["1", "2", "3"], + ), + ChoiceFixture( + test_id="auto_always_never", + input_text="{auto,always,never}", + expected_choices=["auto", "always", "never"], + ), + ChoiceFixture( + test_id="two_choices", + input_text="{a,b}", + expected_choices=["a", "b"], + ), +] + + +@pytest.mark.parametrize( + list(ChoiceFixture._fields), + CHOICE_FIXTURES, + ids=[f.test_id for f in CHOICE_FIXTURES], +) +def test_choices( + test_id: str, + input_text: str, + expected_choices: list[str], +) -> None: + """Test choice enumeration tokenization.""" + tokens = get_usage_tokens(input_text) + # Extract choice values (Name.Constant tokens) + choice_tokens = [v for t, v in tokens if t == "Token.Name.Constant"] + assert choice_tokens == expected_choices + + +# --- Mutex group fixtures --- + + +class MutexGroupFixture(t.NamedTuple): + """Test fixture for mutually exclusive group patterns.""" + + test_id: str + input_text: str + expected_options: list[str] + is_required: bool + + +MUTEX_GROUP_FIXTURES: list[MutexGroupFixture] = [ + MutexGroupFixture( + test_id="optional_short", + input_text="[-a | -b | -c]", + expected_options=["-a", "-b", "-c"], + is_required=False, + ), + MutexGroupFixture( + test_id="optional_long", + input_text="[--foo FOO | --bar BAR]", + expected_options=["--foo", "--bar"], + is_required=False, + ), + MutexGroupFixture( + test_id="required_long", + input_text="(--foo | --bar)", + expected_options=["--foo", "--bar"], + is_required=True, + ), + MutexGroupFixture( + test_id="required_with_metavar", + input_text="(--input FILE | --stdin)", + expected_options=["--input", "--stdin"], + is_required=True, + ), + MutexGroupFixture( + test_id="optional_output_formats", + input_text="[--json | --ndjson | --table]", + expected_options=["--json", "--ndjson", "--table"], + is_required=False, + ), +] + + +@pytest.mark.parametrize( + list(MutexGroupFixture._fields), + MUTEX_GROUP_FIXTURES, + ids=[f.test_id for f in MUTEX_GROUP_FIXTURES], +) +def test_mutex_groups( + test_id: str, + input_text: str, + expected_options: list[str], + is_required: bool, +) -> None: + """Test mutually exclusive group tokenization.""" + tokens = get_usage_tokens(input_text) + + # Check for proper brackets (required uses parens, optional uses brackets) + if is_required: + assert ("Token.Punctuation", "(") in tokens + assert ("Token.Punctuation", ")") in tokens + else: + assert ("Token.Punctuation", "[") in tokens + assert ("Token.Punctuation", "]") in tokens + + # Check pipe operators present + pipe_count = sum(1 for t, v in tokens if t == "Token.Operator" and v == "|") + assert pipe_count == len(expected_options) - 1 + + # Check options are present + for opt in expected_options: + if opt.startswith("--"): + assert ("Token.Name.Tag", opt) in tokens + else: + assert ("Token.Name.Attribute", opt) in tokens + + +# --- Nargs pattern fixtures --- + + +class NargsFixture(t.NamedTuple): + """Test fixture for nargs/variadic patterns.""" + + test_id: str + input_text: str + has_ellipsis: bool + has_metavar: str | None + + +NARGS_FIXTURES: list[NargsFixture] = [ + NargsFixture( + test_id="nargs_plus", + input_text="FILE ...", + has_ellipsis=True, + has_metavar="FILE", + ), + NargsFixture( + test_id="nargs_star", + input_text="[FILE ...]", + has_ellipsis=True, + has_metavar="FILE", + ), + NargsFixture( + test_id="nargs_question", + input_text="[--foo [FOO]]", + has_ellipsis=False, + has_metavar="FOO", + ), + NargsFixture( + test_id="nargs_plus_with_option", + input_text="[--bar X [X ...]]", + has_ellipsis=True, + has_metavar="X", + ), +] + + +@pytest.mark.parametrize( + list(NargsFixture._fields), + NARGS_FIXTURES, + ids=[f.test_id for f in NARGS_FIXTURES], +) +def test_nargs_patterns( + test_id: str, + input_text: str, + has_ellipsis: bool, + has_metavar: str | None, +) -> None: + """Test nargs/variadic pattern tokenization.""" + tokens = get_usage_tokens(input_text) + + # Check ellipsis + ellipsis_present = ("Token.Punctuation", "...") in tokens + assert ellipsis_present == has_ellipsis + + # Check metavar + if has_metavar: + assert ("Token.Name.Variable", has_metavar) in tokens + + +# --- Long option with value fixtures --- + + +class LongOptionValueFixture(t.NamedTuple): + """Test fixture for long options with = values.""" + + test_id: str + input_text: str + option: str + value: str + + +LONG_OPTION_VALUE_FIXTURES: list[LongOptionValueFixture] = [ + LongOptionValueFixture( + test_id="config_file", + input_text="--config=FILE", + option="--config", + value="FILE", + ), + LongOptionValueFixture( + test_id="log_level", + input_text="--log-level=DEBUG", + option="--log-level", + value="DEBUG", + ), + LongOptionValueFixture( + test_id="lowercase_value", + input_text="--output=path", + option="--output", + value="path", + ), +] + + +@pytest.mark.parametrize( + list(LongOptionValueFixture._fields), + LONG_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in LONG_OPTION_VALUE_FIXTURES], +) +def test_long_option_with_equals_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test long option with = value tokenization.""" + tokens = get_usage_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 3 + assert non_ws_tokens[0] == ("Token.Name.Tag", option) + assert non_ws_tokens[1] == ("Token.Operator", "=") + assert non_ws_tokens[2][1] == value + + +# --- Short option with value fixtures --- + + +class ShortOptionValueFixture(t.NamedTuple): + """Test fixture for short options with space-separated values.""" + + test_id: str + input_text: str + option: str + value: str + + +SHORT_OPTION_VALUE_FIXTURES: list[ShortOptionValueFixture] = [ + ShortOptionValueFixture( + test_id="config_path", + input_text="-c config-path", + option="-c", + value="config-path", + ), + ShortOptionValueFixture( + test_id="directory", + input_text="-d DIRECTORY", + option="-d", + value="DIRECTORY", + ), + ShortOptionValueFixture( + test_id="simple_name", + input_text="-r name", + option="-r", + value="name", + ), + ShortOptionValueFixture( + test_id="underscore_metavar", + input_text="-L socket_name", + option="-L", + value="socket_name", + ), + ShortOptionValueFixture( + test_id="multiple_underscores", + input_text="-f tmux_config_file", + option="-f", + value="tmux_config_file", + ), +] + + +@pytest.mark.parametrize( + list(ShortOptionValueFixture._fields), + SHORT_OPTION_VALUE_FIXTURES, + ids=[f.test_id for f in SHORT_OPTION_VALUE_FIXTURES], +) +def test_short_option_with_value( + test_id: str, + input_text: str, + option: str, + value: str, +) -> None: + """Test short option followed by value tokenization.""" + tokens = get_usage_tokens(input_text) + non_ws_tokens = [(t, v) for t, v in tokens if "Whitespace" not in t] + + assert len(non_ws_tokens) >= 2 + assert non_ws_tokens[0] == ("Token.Name.Attribute", option) + assert non_ws_tokens[1][1] == value + + +# --- Full usage string fixtures --- + + +class UsageStringFixture(t.NamedTuple): + """Test fixture for full usage string tokenization.""" + + test_id: str + input_text: str + expected_contains: list[tuple[str, str]] + + +USAGE_STRING_FIXTURES: list[UsageStringFixture] = [ + UsageStringFixture( + test_id="simple_usage", + input_text="usage: cmd [-h]", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "cmd"), + ("Token.Punctuation", "["), + ("Token.Name.Attribute", "-h"), + ("Token.Punctuation", "]"), + ], + ), + UsageStringFixture( + test_id="mutually_exclusive", + input_text="[--json | --ndjson | --table]", + expected_contains=[ + ("Token.Name.Tag", "--json"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Operator", "|"), + ("Token.Name.Tag", "--table"), + ], + ), + UsageStringFixture( + test_id="subcommand", + input_text="usage: vcspull sync", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + # Subcommands now use Name.Function per 30ea233 + ("Token.Name.Function", "sync"), + ], + ), + UsageStringFixture( + test_id="with_choices", + input_text="usage: cmd {a,b,c}", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Constant", "a"), + ("Token.Name.Constant", "b"), + ("Token.Name.Constant", "c"), + ], + ), + UsageStringFixture( + test_id="complex_usage", + input_text="usage: prog [-h] [--verbose] FILE ...", + expected_contains=[ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "prog"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Tag", "--verbose"), + ("Token.Name.Variable", "FILE"), + ("Token.Punctuation", "..."), + ], + ), +] + + +@pytest.mark.parametrize( + list(UsageStringFixture._fields), + USAGE_STRING_FIXTURES, + ids=[f.test_id for f in USAGE_STRING_FIXTURES], +) +def test_usage_string( + test_id: str, + input_text: str, + expected_contains: list[tuple[str, str]], +) -> None: + """Test full usage string tokenization contains expected tokens.""" + tokens = get_usage_tokens(input_text) + for expected_type, expected_value in expected_contains: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not found in tokens" + ) + + +# --- Section header fixtures --- + + +class SectionHeaderFixture(t.NamedTuple): + """Test fixture for section header recognition.""" + + test_id: str + input_text: str + expected_header: str + + +SECTION_HEADER_FIXTURES: list[SectionHeaderFixture] = [ + SectionHeaderFixture( + test_id="positional_arguments", + input_text="positional arguments:", + expected_header="positional arguments:", + ), + SectionHeaderFixture( + test_id="options", + input_text="options:", + expected_header="options:", + ), + SectionHeaderFixture( + test_id="optional_arguments", + input_text="optional arguments:", + expected_header="optional arguments:", + ), + SectionHeaderFixture( + test_id="custom_section", + input_text="advanced options:", + expected_header="advanced options:", + ), +] + + +@pytest.mark.parametrize( + list(SectionHeaderFixture._fields), + SECTION_HEADER_FIXTURES, + ids=[f.test_id for f in SECTION_HEADER_FIXTURES], +) +def test_section_headers( + test_id: str, + input_text: str, + expected_header: str, +) -> None: + """Test section header tokenization.""" + tokens = get_help_tokens(input_text) + # Section headers should be Generic.Subheading + # Strip newlines from token values (lexer may include trailing \n) + subheading_tokens = [ + v.strip() for t, v in tokens if t == "Token.Generic.Subheading" + ] + assert expected_header in subheading_tokens + + +# --- Full help output test --- + + +def test_full_help_output() -> None: + """Test full argparse -h output tokenization.""" + help_text = """\ +usage: vcspull sync [-h] [-c CONFIG] [-d DIRECTORY] + [--json | --ndjson | --table] + [repo-name] [path] + +positional arguments: + repo-name repository name filter + path path filter + +options: + -h, --help show this help message and exit + -c CONFIG, --config CONFIG + config file path + --json output as JSON +""" + tokens = get_help_tokens(help_text) + + # Check usage heading + assert ("Token.Generic.Heading", "usage:") in tokens + + # Check section headers + subheadings = [v for t, v in tokens if t == "Token.Generic.Subheading"] + assert "positional arguments:" in subheadings + assert "options:" in subheadings + + # Check options are recognized + assert ("Token.Name.Attribute", "-h") in tokens + assert ("Token.Name.Tag", "--help") in tokens + assert ("Token.Name.Tag", "--config") in tokens + assert ("Token.Name.Tag", "--json") in tokens + + # Check command/positional names + assert ("Token.Name.Label", "vcspull") in tokens + # Subcommands now use Name.Function per 30ea233 + assert ("Token.Name.Function", "sync") in tokens + + +# --- Real vcspull usage output test --- + + +def test_vcspull_sync_usage() -> None: + """Test real vcspull sync usage output tokenization.""" + usage_text = """\ +usage: vcspull sync [-h] [-c CONFIG] [-d DIRECTORY] + [--json | --ndjson | --table] [--color {auto,always,never}] + [--no-progress] [--verbose] + [repo-name] [path]""" + + tokens = get_usage_tokens(usage_text) + + expected = [ + ("Token.Generic.Heading", "usage:"), + ("Token.Name.Label", "vcspull"), + # Subcommands now use Name.Function per 30ea233 + ("Token.Name.Function", "sync"), + ("Token.Name.Attribute", "-h"), + ("Token.Name.Attribute", "-c"), + ("Token.Name.Variable", "CONFIG"), + ("Token.Name.Attribute", "-d"), + ("Token.Name.Variable", "DIRECTORY"), + ("Token.Name.Tag", "--json"), + ("Token.Name.Tag", "--ndjson"), + ("Token.Name.Tag", "--table"), + ("Token.Name.Tag", "--color"), + ("Token.Name.Tag", "--no-progress"), + ("Token.Name.Tag", "--verbose"), + # Optional positional args in brackets also use Name.Function per 30ea233 + ("Token.Name.Function", "repo-name"), + ("Token.Name.Function", "path"), + ] + + for expected_type, expected_value in expected: + assert (expected_type, expected_value) in tokens, ( + f"Expected ({expected_type}, {expected_value!r}) not in tokens" + ) + + # Check choices are properly tokenized + assert ("Token.Name.Constant", "auto") in tokens + assert ("Token.Name.Constant", "always") in tokens + assert ("Token.Name.Constant", "never") in tokens + + +# --- tokenize_argparse helper function test --- + + +def test_tokenize_argparse_helper() -> None: + """Test the tokenize_argparse helper function.""" + result = tokenize_argparse("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result + + +def test_tokenize_usage_helper() -> None: + """Test the tokenize_usage helper function.""" + result = tokenize_usage("usage: cmd [-h]") + + assert result[0] == ("Token.Generic.Heading", "usage:") + assert ("Token.Name.Label", "cmd") in result + assert ("Token.Name.Attribute", "-h") in result + + +# --- Lexer class selection tests --- + + +def test_argparse_lexer_usage_detection() -> None: + """Test ArgparseLexer handles usage lines correctly.""" + lexer = ArgparseLexer() + tokens = list(lexer.get_tokens("usage: cmd [-h]")) + token_types = [str(t) for t, v in tokens] + assert "Token.Generic.Heading" in token_types + + +def test_argparse_lexer_section_detection() -> None: + """Test ArgparseLexer handles section headers correctly.""" + lexer = ArgparseLexer() + tokens = list(lexer.get_tokens("positional arguments:")) + token_types = [str(t) for t, v in tokens] + assert "Token.Generic.Subheading" in token_types + + +def test_argparse_usage_lexer_standalone() -> None: + """Test ArgparseUsageLexer works standalone.""" + lexer = ArgparseUsageLexer() + tokens = list(lexer.get_tokens("usage: cmd [-h] --foo FILE")) + token_types = [str(t) for t, v in tokens] + + assert "Token.Generic.Heading" in token_types + assert "Token.Name.Label" in token_types # cmd + assert "Token.Name.Attribute" in token_types # -h + assert "Token.Name.Tag" in token_types # --foo + + +def test_argparse_help_lexer_multiline() -> None: + """Test ArgparseHelpLexer handles multiline help.""" + lexer = ArgparseHelpLexer() + help_text = """usage: cmd + +options: + -h help +""" + tokens = list(lexer.get_tokens(help_text)) + token_values = [v for t, v in tokens] + + assert "usage:" in token_values + assert "options:" in token_values or any( + "options:" in v for v in token_values if isinstance(v, str) + ) + + +def test_lowercase_metavar_with_underscores() -> None: + """Test lowercase metavars with underscores are fully captured. + + Regression test: previously `socket_name` was tokenized as `socket` + `_name`. + Example from CLI usage with underscore metavars. + """ + usage = "usage: prog [-L socket_name] [-S socket_path] [-f config_file]" + tokens = get_usage_tokens(usage) + + # All underscore metavars should be fully captured + assert ("Token.Name.Variable", "socket_name") in tokens + assert ("Token.Name.Variable", "socket_path") in tokens + assert ("Token.Name.Variable", "config_file") in tokens diff --git a/tests/test_cli.py b/tests/test_cli.py index faecac4..a51de42 100644 --- a/tests/test_cli.py +++ b/tests/test_cli.py @@ -81,6 +81,18 @@ class CommandLineTestFixture(t.NamedTuple): argv_args=["g --help"], expect_cmd=None, ), + CommandLineTestFixture( + test_id="g-version-inside-git-dir", + env=EnvFlag.Git, + argv_args=["g", "--version"], + expect_cmd=None, # Returns None after printing version + ), + CommandLineTestFixture( + test_id="g-version-short-inside-empty-dir", + env=EnvFlag.Empty, + argv_args=["g", "-V"], + expect_cmd=None, + ), ] @@ -120,3 +132,25 @@ def test_command_line( shell=True, stderr=subprocess.STDOUT, ) + + +def test_version_output(capsys: pytest.CaptureFixture[str]) -> None: + """Test that --version prints the correct version string.""" + from g import __version__, sys as gsys + + with patch.object(gsys, "argv", ["g", "--version"]): + result = run() + assert result is None + captured = capsys.readouterr() + assert f"g {__version__}" in captured.out + + +def test_version_short_output(capsys: pytest.CaptureFixture[str]) -> None: + """Test that -V prints the correct version string.""" + from g import __version__, sys as gsys + + with patch.object(gsys, "argv", ["g", "-V"]): + result = run() + assert result is None + captured = capsys.readouterr() + assert f"g {__version__}" in captured.out diff --git a/uv.lock b/uv.lock index 15f0792..775e674 100644 --- a/uv.lock +++ b/uv.lock @@ -386,6 +386,8 @@ dev = [ { name = "sphinx-inline-tabs" }, { name = "sphinxext-opengraph" }, { name = "sphinxext-rediraffe" }, + { name = "types-docutils" }, + { name = "types-pygments" }, ] docs = [ { name = "aafigure" }, @@ -450,6 +452,8 @@ dev = [ { name = "sphinx-inline-tabs" }, { name = "sphinxext-opengraph" }, { name = "sphinxext-rediraffe" }, + { name = "types-docutils" }, + { name = "types-pygments" }, ] docs = [ { name = "aafigure" }, @@ -1542,6 +1546,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/23/d1/136eb2cb77520a31e1f64cbae9d33ec6df0d78bdf4160398e86eec8a8754/tomli-2.4.0-py3-none-any.whl", hash = "sha256:1f776e7d669ebceb01dee46484485f43a4048746235e683bcdffacdf1fb4785a", size = 14477, upload-time = "2026-01-11T11:22:37.446Z" }, ] +[[package]] +name = "types-docutils" +version = "0.22.3.20251115" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/eb/d7/576ec24bf61a280f571e1f22284793adc321610b9bcfba1bf468cf7b334f/types_docutils-0.22.3.20251115.tar.gz", hash = "sha256:0f79ea6a7bd4d12d56c9f824a0090ffae0ea4204203eb0006392906850913e16", size = 56828, upload-time = "2025-11-15T02:59:57.371Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/9c/01/61ac9eb38f1f978b47443dc6fd2e0a3b0f647c2da741ddad30771f1b2b6f/types_docutils-0.22.3.20251115-py3-none-any.whl", hash = "sha256:c6e53715b65395d00a75a3a8a74e352c669bc63959e65a207dffaa22f4a2ad6e", size = 91951, upload-time = "2025-11-15T02:59:56.413Z" }, +] + +[[package]] +name = "types-pygments" +version = "2.19.0.20251121" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "types-docutils" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/90/3b/cd650700ce9e26b56bd1a6aa4af397bbbc1784e22a03971cb633cdb0b601/types_pygments-2.19.0.20251121.tar.gz", hash = "sha256:eef114fde2ef6265365522045eac0f8354978a566852f69e75c531f0553822b1", size = 18590, upload-time = "2025-11-21T03:03:46.623Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/99/8a/9244b21f1d60dcc62e261435d76b02f1853b4771663d7ec7d287e47a9ba9/types_pygments-2.19.0.20251121-py3-none-any.whl", hash = "sha256:cb3bfde34eb75b984c98fb733ce4f795213bd3378f855c32e75b49318371bb25", size = 25674, upload-time = "2025-11-21T03:03:45.72Z" }, +] + [[package]] name = "typing-extensions" version = "4.15.0"