From 81174d7d323b4196b498c3b7292c20bc6fc6f91d Mon Sep 17 00:00:00 2001 From: Andrew Brookins Date: Mon, 26 Jan 2026 15:20:15 -0800 Subject: [PATCH 1/4] Add benchmark test suite for model operations --- make_sync.py | 7 + pyproject.toml | 1 + tests/test_benchmarks.py | 618 +++++++++++++++++++++++++++++++++++++++ uv.lock | 24 ++ 4 files changed, 650 insertions(+) create mode 100644 tests/test_benchmarks.py diff --git a/make_sync.py b/make_sync.py index 168910f2..b43733b6 100644 --- a/make_sync.py +++ b/make_sync.py @@ -54,9 +54,14 @@ def main(): additional_replacements=ADDITIONAL_REPLACEMENTS, ), ] + # Files to exclude from sync generation (benchmarks require special async handling) + excluded_files = {"test_benchmarks.py"} + filepaths = [] for root, _, filenames in os.walk(base_dir): for filename in filenames: + if filename in excluded_files: + continue if filename.rpartition(".")[-1] in ( "py", "pyi", @@ -107,5 +112,7 @@ def remove_run_async_call(match): f.write(content) + + if __name__ == "__main__": main() diff --git a/pyproject.toml b/pyproject.toml index 225c4294..586813bb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -72,6 +72,7 @@ dev = [ "pre-commit>=4.3.0; python_version >= '3.10'", "mkdocs>=1.6.1", "mkdocs-material>=9.7.1", + "pytest-benchmark>=5.2.3", ] [build-system] diff --git a/tests/test_benchmarks.py b/tests/test_benchmarks.py new file mode 100644 index 00000000..48bc3b5a --- /dev/null +++ b/tests/test_benchmarks.py @@ -0,0 +1,618 @@ +""" +Benchmarks for redis-om-python model instantiation and operations. + +These benchmarks document the current performance characteristics of the library. +Run with: pytest tests/test_benchmarks.py -v --benchmark-only + +Related to GitHub issue #640: HashModel instantiation performance. +""" + +import asyncio +from datetime import datetime +from typing import List, Optional + +import pytest +import pytest_asyncio +from pydantic import BaseModel as PydanticBaseModel + +from aredis_om import EmbeddedJsonModel, Field, HashModel, JsonModel, Migrator + +# Skip if pytest-benchmark is not installed +pytest.importorskip("pytest_benchmark") + + +# ============================================================================= +# Test Models - Simple (Pydantic baseline) +# ============================================================================= + + +class SimplePydanticModel(PydanticBaseModel): + """Plain Pydantic model for baseline comparison.""" + + name: str + age: int + + +class ComplexPydanticModel(PydanticBaseModel): + """Pydantic model with more fields for comparison.""" + + name: str + email: str + age: int + score: float + active: bool + tags: List[str] = [] + created_at: Optional[datetime] = None + metadata: Optional[dict] = None + + +# ============================================================================= +# Fixtures for Redis-connected models +# ============================================================================= + + +@pytest_asyncio.fixture +async def redis_models(redis): + """Create model classes with the test Redis connection.""" + loop = asyncio.get_event_loop() + + class SimpleHashModel(HashModel, index=True): + """Simple HashModel with minimal fields.""" + + name: str = Field(index=True) + age: int = Field(index=True, sortable=True) + + class Meta: + database = redis + + class SimpleJsonModel(JsonModel, index=True): + """Simple JsonModel with minimal fields.""" + + name: str = Field(index=True) + age: int = Field(index=True, sortable=True) + + class Meta: + database = redis + + class ComplexHashModel(HashModel, index=True): + """HashModel with multiple indexed fields.""" + + name: str = Field(index=True) + email: str = Field(index=True) + age: int = Field(index=True, sortable=True) + score: float = Field(index=True, sortable=True) + active: bool = Field(index=True) + + class Meta: + database = redis + + class EmbeddedAddress(EmbeddedJsonModel, index=True): + """Embedded model for nested JsonModel testing.""" + + street: str + city: str = Field(index=True) + zip_code: str + + class ComplexJsonModel(JsonModel, index=True): + """JsonModel with nested embedded model.""" + + name: str = Field(index=True) + email: str = Field(index=True) + age: int = Field(index=True, sortable=True) + address: EmbeddedAddress + tags: List[str] = [] + + class Meta: + database = redis + + return { + "SimpleHashModel": SimpleHashModel, + "SimpleJsonModel": SimpleJsonModel, + "ComplexHashModel": ComplexHashModel, + "ComplexJsonModel": ComplexJsonModel, + "EmbeddedAddress": EmbeddedAddress, + "loop": loop, + } + + +# ============================================================================= +# Instantiation Benchmarks +# ============================================================================= + + +class TestInstantiationBenchmarks: + """Benchmark model instantiation performance.""" + + def test_pydantic_simple_instantiation(self, benchmark): + """Baseline: Plain Pydantic model instantiation.""" + benchmark(SimplePydanticModel, name="Alice", age=30) + + def test_pydantic_complex_instantiation(self, benchmark): + """Baseline: Complex Pydantic model instantiation.""" + benchmark( + ComplexPydanticModel, + name="Alice", + email="alice@example.com", + age=30, + score=95.5, + active=True, + tags=["admin", "user"], + ) + + def test_hashmodel_simple_instantiation(self, benchmark, redis_models): + """HashModel with minimal fields.""" + SimpleHashModel = redis_models["SimpleHashModel"] + benchmark(SimpleHashModel, name="Alice", age=30) + + def test_hashmodel_complex_instantiation(self, benchmark, redis_models): + """HashModel with multiple indexed fields.""" + ComplexHashModel = redis_models["ComplexHashModel"] + benchmark( + ComplexHashModel, + name="Alice", + email="alice@example.com", + age=30, + score=95.5, + active=True, + ) + + def test_jsonmodel_simple_instantiation(self, benchmark, redis_models): + """JsonModel with minimal fields.""" + SimpleJsonModel = redis_models["SimpleJsonModel"] + benchmark(SimpleJsonModel, name="Alice", age=30) + + def test_jsonmodel_complex_instantiation(self, benchmark, redis_models): + """JsonModel with nested embedded model.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + address = EmbeddedAddress( + street="123 Main St", city="Springfield", zip_code="12345" + ) + benchmark( + ComplexJsonModel, + name="Alice", + email="alice@example.com", + age=30, + address=address, + tags=["admin", "user"], + ) + + +# ============================================================================= +# Redis I/O Benchmarks (Save, Get, Query, Update, Batch) +# ============================================================================= +# These benchmarks are in the sync version only (tests_sync/test_benchmarks.py) +# because pytest-benchmark doesn't support async functions and we can't use +# run_until_complete() inside an already-running event loop. +# +# To run Redis I/O benchmarks: +# 1. make sync +# 2. pytest tests_sync/test_benchmarks.py -v --benchmark-only +# ============================================================================= + + +class TestSaveBenchmarks: + """Benchmark model save operations.""" + + @pytest.fixture(autouse=True) + async def setup_migrator(self, redis_models): + """Run migrator to create indexes.""" + await Migrator().run() + + def test_hashmodel_save(self, benchmark, redis_models): + """Save a single HashModel instance.""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + async def save_model(): + model = SimpleHashModel(name="Alice", age=30) + await model.save() + return model + + benchmark.pedantic( + lambda: loop.run_until_complete(save_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_save(self, benchmark, redis_models): + """Save a single JsonModel instance.""" + SimpleJsonModel = redis_models["SimpleJsonModel"] + loop = redis_models["loop"] + + async def save_model(): + model = SimpleJsonModel(name="Alice", age=30) + await model.save() + return model + + benchmark.pedantic( + lambda: loop.run_until_complete(save_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_with_embedded_save(self, benchmark, redis_models): + """Save JsonModel with embedded document.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + loop = redis_models["loop"] + + async def save_model(): + address = EmbeddedAddress( + street="123 Main St", city="Springfield", zip_code="12345" + ) + model = ComplexJsonModel( + name="Alice", + email="alice@example.com", + age=30, + address=address, + tags=["admin", "user"], + ) + await model.save() + return model + + benchmark.pedantic( + lambda: loop.run_until_complete(save_model()), rounds=100, iterations=1 + ) + + +# ============================================================================= +# Get/Retrieve Benchmarks +# ============================================================================= + + +class TestGetBenchmarks: + """Benchmark model retrieval operations.""" + + @pytest.fixture(autouse=True) + async def setup_migrator(self, redis_models): + """Run migrator to create indexes.""" + await Migrator().run() + + def test_hashmodel_get(self, benchmark, redis_models): + """Get a HashModel by primary key.""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + # Setup: create and save model + async def setup(): + model = SimpleHashModel(name="Alice", age=30) + await model.save() + return model.pk + + pk = loop.run_until_complete(setup()) + + async def get_model(): + return await SimpleHashModel.get(pk) + + benchmark.pedantic( + lambda: loop.run_until_complete(get_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_get(self, benchmark, redis_models): + """Get a JsonModel by primary key.""" + SimpleJsonModel = redis_models["SimpleJsonModel"] + loop = redis_models["loop"] + + async def setup(): + model = SimpleJsonModel(name="Alice", age=30) + await model.save() + return model.pk + + pk = loop.run_until_complete(setup()) + + async def get_model(): + return await SimpleJsonModel.get(pk) + + benchmark.pedantic( + lambda: loop.run_until_complete(get_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_with_embedded_get(self, benchmark, redis_models): + """Get JsonModel with embedded document.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + loop = redis_models["loop"] + + async def setup(): + address = EmbeddedAddress( + street="123 Main St", city="Springfield", zip_code="12345" + ) + model = ComplexJsonModel( + name="Alice", + email="alice@example.com", + age=30, + address=address, + tags=["admin", "user"], + ) + await model.save() + return model.pk + + pk = loop.run_until_complete(setup()) + + async def get_model(): + return await ComplexJsonModel.get(pk) + + benchmark.pedantic( + lambda: loop.run_until_complete(get_model()), rounds=100, iterations=1 + ) + + +# ============================================================================= +# Query/Find Benchmarks +# ============================================================================= + + +class TestQueryBenchmarks: + """Benchmark query operations.""" + + @pytest.fixture(autouse=True) + def setup_data(self, redis_models): + """Create test data for queries.""" + loop = redis_models["loop"] + + async def create_data(): + await Migrator().run() + + SimpleHashModel = redis_models["SimpleHashModel"] + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + + # Create 50 hash models + for i in range(50): + model = SimpleHashModel(name=f"User{i}", age=20 + i) + await model.save() + + # Create 50 json models with embedded addresses + for i in range(50): + address = EmbeddedAddress( + street=f"{i} Main St", city=f"City{i % 10}", zip_code=f"{10000 + i}" + ) + model = ComplexJsonModel( + name=f"User{i}", + email=f"user{i}@example.com", + age=20 + i, + address=address, + tags=["tag1", "tag2"] if i % 2 == 0 else ["tag3"], + ) + await model.save() + + loop.run_until_complete(create_data()) + + def test_hashmodel_find_all(self, benchmark, redis_models): + """Find all HashModel instances.""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + async def find_all(): + return await SimpleHashModel.find().all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_all()), rounds=50, iterations=1 + ) + + def test_hashmodel_find_by_field(self, benchmark, redis_models): + """Find HashModel by indexed field.""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + async def find_by_name(): + return await SimpleHashModel.find(SimpleHashModel.name == "User25").all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_by_name()), rounds=100, iterations=1 + ) + + def test_jsonmodel_find_all(self, benchmark, redis_models): + """Find all JsonModel instances.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + loop = redis_models["loop"] + + async def find_all(): + return await ComplexJsonModel.find().all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_all()), rounds=50, iterations=1 + ) + + def test_jsonmodel_find_by_field(self, benchmark, redis_models): + """Find JsonModel by indexed field.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + loop = redis_models["loop"] + + async def find_by_name(): + return await ComplexJsonModel.find(ComplexJsonModel.name == "User25").all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_by_name()), rounds=100, iterations=1 + ) + + def test_jsonmodel_find_by_embedded_field(self, benchmark, redis_models): + """Find JsonModel by embedded document field.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + loop = redis_models["loop"] + + async def find_by_city(): + return await ComplexJsonModel.find( + ComplexJsonModel.address.city == "City5" + ).all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_by_city()), rounds=100, iterations=1 + ) + + def test_jsonmodel_find_with_sort(self, benchmark, redis_models): + """Find JsonModel with sorting.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + loop = redis_models["loop"] + + async def find_sorted(): + return await ComplexJsonModel.find().sort_by("age").all() + + benchmark.pedantic( + lambda: loop.run_until_complete(find_sorted()), rounds=50, iterations=1 + ) + + def test_jsonmodel_find_with_pagination(self, benchmark, redis_models): + """Find JsonModel with pagination.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + loop = redis_models["loop"] + + async def find_paginated(): + return await ComplexJsonModel.find().page(offset=10, limit=10) + + benchmark.pedantic( + lambda: loop.run_until_complete(find_paginated()), rounds=100, iterations=1 + ) + + +# ============================================================================= +# Update Benchmarks +# ============================================================================= + + +class TestUpdateBenchmarks: + """Benchmark update operations.""" + + @pytest.fixture(autouse=True) + async def setup_migrator(self, redis_models): + """Run migrator to create indexes.""" + await Migrator().run() + + def test_hashmodel_update(self, benchmark, redis_models): + """Update a HashModel instance.""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + async def setup(): + model = SimpleHashModel(name="Alice", age=30) + await model.save() + return model + + model = loop.run_until_complete(setup()) + counter = [0] + + async def update_model(): + counter[0] += 1 + return await model.update(age=30 + counter[0]) + + benchmark.pedantic( + lambda: loop.run_until_complete(update_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_update(self, benchmark, redis_models): + """Update a JsonModel instance.""" + SimpleJsonModel = redis_models["SimpleJsonModel"] + loop = redis_models["loop"] + + async def setup(): + model = SimpleJsonModel(name="Alice", age=30) + await model.save() + return model + + model = loop.run_until_complete(setup()) + counter = [0] + + async def update_model(): + counter[0] += 1 + return await model.update(age=30 + counter[0]) + + benchmark.pedantic( + lambda: loop.run_until_complete(update_model()), rounds=100, iterations=1 + ) + + def test_jsonmodel_update_embedded(self, benchmark, redis_models): + """Update embedded field in JsonModel.""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + loop = redis_models["loop"] + + async def setup(): + address = EmbeddedAddress( + street="123 Main St", city="Springfield", zip_code="12345" + ) + model = ComplexJsonModel( + name="Alice", + email="alice@example.com", + age=30, + address=address, + tags=["admin"], + ) + await model.save() + return model + + model = loop.run_until_complete(setup()) + counter = [0] + + async def update_embedded(): + counter[0] += 1 + return await model.update(address__city=f"City{counter[0]}") + + benchmark.pedantic( + lambda: loop.run_until_complete(update_embedded()), rounds=100, iterations=1 + ) + + +# ============================================================================= +# Batch Operation Benchmarks +# ============================================================================= + + +class TestBatchBenchmarks: + """Benchmark batch operations.""" + + @pytest.fixture(autouse=True) + async def setup_migrator(self, redis_models): + """Run migrator to create indexes.""" + await Migrator().run() + + def test_hashmodel_add_many(self, benchmark, redis_models): + """Save multiple HashModel instances using add().""" + SimpleHashModel = redis_models["SimpleHashModel"] + loop = redis_models["loop"] + + async def add_many(): + models = [SimpleHashModel(name=f"User{i}", age=20 + i) for i in range(10)] + await SimpleHashModel.add(models) + return models + + benchmark.pedantic( + lambda: loop.run_until_complete(add_many()), rounds=50, iterations=1 + ) + + def test_jsonmodel_add_many(self, benchmark, redis_models): + """Save multiple JsonModel instances using add().""" + SimpleJsonModel = redis_models["SimpleJsonModel"] + loop = redis_models["loop"] + + async def add_many(): + models = [SimpleJsonModel(name=f"User{i}", age=20 + i) for i in range(10)] + await SimpleJsonModel.add(models) + return models + + benchmark.pedantic( + lambda: loop.run_until_complete(add_many()), rounds=50, iterations=1 + ) + + def test_jsonmodel_with_embedded_add_many(self, benchmark, redis_models): + """Save multiple JsonModels with embedded documents using add().""" + ComplexJsonModel = redis_models["ComplexJsonModel"] + EmbeddedAddress = redis_models["EmbeddedAddress"] + loop = redis_models["loop"] + + async def add_many(): + models = [] + for i in range(10): + address = EmbeddedAddress( + street=f"{i} Main St", city=f"City{i}", zip_code=f"{10000 + i}" + ) + model = ComplexJsonModel( + name=f"User{i}", + email=f"user{i}@example.com", + age=20 + i, + address=address, + tags=["tag1", "tag2"], + ) + models.append(model) + await ComplexJsonModel.add(models) + return models + + benchmark.pedantic( + lambda: loop.run_until_complete(add_many()), rounds=50, iterations=1 + ) diff --git a/uv.lock b/uv.lock index c80f1bc7..8b8bd016 100644 --- a/uv.lock +++ b/uv.lock @@ -1672,6 +1672,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/8e/37/efad0257dc6e593a18957422533ff0f87ede7c9c6ea010a2177d738fb82f/pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0", size = 11842, upload-time = "2024-07-21T12:58:20.04Z" }, ] +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", size = 104716, upload-time = "2022-10-25T20:38:06.303Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5", size = 22335, upload-time = "2022-10-25T20:38:27.636Z" }, +] + [[package]] name = "pycodestyle" version = "2.14.0" @@ -1912,6 +1921,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/e5/35/f8b19922b6a25bc0880171a2f1a003eaeb93657475193ab516fd87cac9da/pytest_asyncio-1.3.0-py3-none-any.whl", hash = "sha256:611e26147c7f77640e6d0a92a38ed17c3e9848063698d5c93d5aa7aa11cebff5", size = 15075, upload-time = "2025-11-10T16:07:45.537Z" }, ] +[[package]] +name = "pytest-benchmark" +version = "5.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "py-cpuinfo" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/34/9f732b76456d64faffbef6232f1f9dbec7a7c4999ff46282fa418bd1af66/pytest_benchmark-5.2.3.tar.gz", hash = "sha256:deb7317998a23c650fd4ff76e1230066a76cb45dcece0aca5607143c619e7779", size = 341340, upload-time = "2025-11-09T18:48:43.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/29/e756e715a48959f1c0045342088d7ca9762a2f509b945f362a316e9412b7/pytest_benchmark-5.2.3-py3-none-any.whl", hash = "sha256:bc839726ad20e99aaa0d11a127445457b4219bdb9e80a1afc4b51da7f96b0803", size = 45255, upload-time = "2025-11-09T18:48:39.765Z" }, +] + [[package]] name = "pytest-cov" version = "7.0.0" @@ -2130,6 +2152,7 @@ dev = [ { name = "pre-commit" }, { name = "pytest" }, { name = "pytest-asyncio" }, + { name = "pytest-benchmark" }, { name = "pytest-cov" }, { name = "pytest-xdist" }, { name = "tox" }, @@ -2174,6 +2197,7 @@ dev = [ { name = "pre-commit", marker = "python_full_version >= '3.10'", specifier = ">=4.3.0" }, { name = "pytest", specifier = ">=8.0.2,<10.0.0" }, { name = "pytest-asyncio", specifier = ">=0.24,<1.4" }, + { name = "pytest-benchmark", specifier = ">=5.2.3" }, { name = "pytest-cov", specifier = ">=5,<8" }, { name = "pytest-xdist", specifier = ">=3.1.0" }, { name = "tox", specifier = ">=4.14.1" }, From 8df07821a49b7a0856e378a0ab484f20b36a2b5f Mon Sep 17 00:00:00 2001 From: Andrew Brookins Date: Mon, 26 Jan 2026 15:33:03 -0800 Subject: [PATCH 2/4] Add benchmark job to CI with summary reporting --- .github/workflows/ci.yml | 75 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index a1b51b7a..993e6992 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -94,3 +94,78 @@ jobs: flags: unit env_vars: OS fail_ci_if_error: false + + benchmark: + name: Benchmarks + needs: lint + runs-on: ubuntu-latest + timeout-minutes: 10 + services: + redis: + image: redis/redis-stack:latest + ports: + - 6379:6379 + options: >- + --health-cmd "redis-cli ping" + --health-interval 10s + --health-timeout 5s + --health-retries 5 + steps: + - name: Checkout + uses: actions/checkout@v6 + - name: Install uv + uses: astral-sh/setup-uv@v7 + - name: Setup Python ${{ env.pythonversion }} + uses: actions/setup-python@v6 + with: + python-version: ${{ env.pythonversion }} + - name: Install dependencies + run: uv sync + - name: Make sync version of library (redis_om) + run: make sync + - name: Run benchmarks + env: + REDIS_OM_URL: "redis://localhost:6379?decode_responses=True" + run: | + uv run pytest tests/test_benchmarks.py -v --benchmark-only --benchmark-json=benchmark-results.json + - name: Upload benchmark results + uses: actions/upload-artifact@v4 + with: + name: benchmark-results + path: benchmark-results.json + - name: Display benchmark summary + run: | + echo "## Benchmark Results" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "| Test | Mean | Min | Max | OPS |" >> $GITHUB_STEP_SUMMARY + echo "|------|------|-----|-----|-----|" >> $GITHUB_STEP_SUMMARY + uv run python -c " + import json + with open('benchmark-results.json') as f: + data = json.load(f) + for b in data['benchmarks']: + name = b['name'].replace('test_', '') + mean = b['stats']['mean'] * 1e6 # convert to microseconds + min_val = b['stats']['min'] * 1e6 + max_val = b['stats']['max'] * 1e6 + ops = b['stats']['ops'] + if mean < 1: + mean_str = f'{mean*1000:.0f}ns' + elif mean < 1000: + mean_str = f'{mean:.1f}μs' + else: + mean_str = f'{mean/1000:.1f}ms' + if min_val < 1: + min_str = f'{min_val*1000:.0f}ns' + elif min_val < 1000: + min_str = f'{min_val:.1f}μs' + else: + min_str = f'{min_val/1000:.1f}ms' + if max_val < 1: + max_str = f'{max_val*1000:.0f}ns' + elif max_val < 1000: + max_str = f'{max_val:.1f}μs' + else: + max_str = f'{max_val/1000:.1f}ms' + print(f'| {name} | {mean_str} | {min_str} | {max_str} | {ops:.0f}/s |') + " >> $GITHUB_STEP_SUMMARY From ba42fd2eca841f3a1d3621dbe7d13de0dec5fdac Mon Sep 17 00:00:00 2001 From: Andrew Brookins Date: Mon, 26 Jan 2026 15:46:38 -0800 Subject: [PATCH 3/4] Fix benchmark summary output to GitHub step summary --- .github/workflows/ci.yml | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 993e6992..dbddb72c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -135,11 +135,13 @@ jobs: path: benchmark-results.json - name: Display benchmark summary run: | - echo "## Benchmark Results" >> $GITHUB_STEP_SUMMARY - echo "" >> $GITHUB_STEP_SUMMARY - echo "| Test | Mean | Min | Max | OPS |" >> $GITHUB_STEP_SUMMARY - echo "|------|------|-----|-----|-----|" >> $GITHUB_STEP_SUMMARY - uv run python -c " + cat >> $GITHUB_STEP_SUMMARY << 'EOF' + ## Benchmark Results + + | Test | Mean | Min | Max | OPS | + |------|------|-----|-----|-----| + EOF + uv run python << 'PYTHON_SCRIPT' >> $GITHUB_STEP_SUMMARY import json with open('benchmark-results.json') as f: data = json.load(f) @@ -152,20 +154,20 @@ jobs: if mean < 1: mean_str = f'{mean*1000:.0f}ns' elif mean < 1000: - mean_str = f'{mean:.1f}μs' + mean_str = f'{mean:.1f}us' else: mean_str = f'{mean/1000:.1f}ms' if min_val < 1: min_str = f'{min_val*1000:.0f}ns' elif min_val < 1000: - min_str = f'{min_val:.1f}μs' + min_str = f'{min_val:.1f}us' else: min_str = f'{min_val/1000:.1f}ms' if max_val < 1: max_str = f'{max_val*1000:.0f}ns' elif max_val < 1000: - max_str = f'{max_val:.1f}μs' + max_str = f'{max_val:.1f}us' else: max_str = f'{max_val/1000:.1f}ms' print(f'| {name} | {mean_str} | {min_str} | {max_str} | {ops:.0f}/s |') - " >> $GITHUB_STEP_SUMMARY + PYTHON_SCRIPT From 443060d7b41146f5da61ed6fff0311413fe452bb Mon Sep 17 00:00:00 2001 From: Andrew Brookins Date: Mon, 26 Jan 2026 15:53:12 -0800 Subject: [PATCH 4/4] Output benchmark table to both logs and step summary --- .github/workflows/ci.yml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index dbddb72c..348a8cb7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -135,14 +135,13 @@ jobs: path: benchmark-results.json - name: Display benchmark summary run: | - cat >> $GITHUB_STEP_SUMMARY << 'EOF' - ## Benchmark Results - - | Test | Mean | Min | Max | OPS | - |------|------|-----|-----|-----| - EOF - uv run python << 'PYTHON_SCRIPT' >> $GITHUB_STEP_SUMMARY + # Generate the benchmark table + uv run python << 'PYTHON_SCRIPT' > benchmark-table.txt import json + print("## Benchmark Results") + print("") + print("| Test | Mean | Min | Max | OPS |") + print("|------|------|-----|-----|-----|") with open('benchmark-results.json') as f: data = json.load(f) for b in data['benchmarks']: @@ -171,3 +170,6 @@ jobs: max_str = f'{max_val/1000:.1f}ms' print(f'| {name} | {mean_str} | {min_str} | {max_str} | {ops:.0f}/s |') PYTHON_SCRIPT + # Output to both logs and step summary + cat benchmark-table.txt + cat benchmark-table.txt >> $GITHUB_STEP_SUMMARY