Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 79 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,3 +94,82 @@ jobs:
flags: unit
env_vars: OS
fail_ci_if_error: false

benchmark:
name: Benchmarks
needs: lint
runs-on: ubuntu-latest
timeout-minutes: 10
services:
redis:
image: redis/redis-stack:latest
ports:
- 6379:6379
options: >-
--health-cmd "redis-cli ping"
--health-interval 10s
--health-timeout 5s
--health-retries 5
steps:
- name: Checkout
uses: actions/checkout@v6
- name: Install uv
uses: astral-sh/setup-uv@v7
- name: Setup Python ${{ env.pythonversion }}
uses: actions/setup-python@v6
with:
python-version: ${{ env.pythonversion }}
- name: Install dependencies
run: uv sync
- name: Make sync version of library (redis_om)
run: make sync
- name: Run benchmarks
env:
REDIS_OM_URL: "redis://localhost:6379?decode_responses=True"
run: |
uv run pytest tests/test_benchmarks.py -v --benchmark-only --benchmark-json=benchmark-results.json
- name: Upload benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: benchmark-results.json
- name: Display benchmark summary
run: |
# Generate the benchmark table
uv run python << 'PYTHON_SCRIPT' > benchmark-table.txt
import json
print("## Benchmark Results")
print("")
print("| Test | Mean | Min | Max | OPS |")
print("|------|------|-----|-----|-----|")
with open('benchmark-results.json') as f:
data = json.load(f)
for b in data['benchmarks']:
name = b['name'].replace('test_', '')
mean = b['stats']['mean'] * 1e6 # convert to microseconds
min_val = b['stats']['min'] * 1e6
max_val = b['stats']['max'] * 1e6
ops = b['stats']['ops']
if mean < 1:
mean_str = f'{mean*1000:.0f}ns'
elif mean < 1000:
mean_str = f'{mean:.1f}us'
else:
mean_str = f'{mean/1000:.1f}ms'
if min_val < 1:
min_str = f'{min_val*1000:.0f}ns'
elif min_val < 1000:
min_str = f'{min_val:.1f}us'
else:
min_str = f'{min_val/1000:.1f}ms'
if max_val < 1:
max_str = f'{max_val*1000:.0f}ns'
elif max_val < 1000:
max_str = f'{max_val:.1f}us'
else:
max_str = f'{max_val/1000:.1f}ms'
print(f'| {name} | {mean_str} | {min_str} | {max_str} | {ops:.0f}/s |')
PYTHON_SCRIPT
# Output to both logs and step summary
cat benchmark-table.txt
cat benchmark-table.txt >> $GITHUB_STEP_SUMMARY
7 changes: 7 additions & 0 deletions make_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,9 +54,14 @@ def main():
additional_replacements=ADDITIONAL_REPLACEMENTS,
),
]
# Files to exclude from sync generation (benchmarks require special async handling)
excluded_files = {"test_benchmarks.py"}

filepaths = []
for root, _, filenames in os.walk(base_dir):
for filename in filenames:
if filename in excluded_files:
continue
if filename.rpartition(".")[-1] in (
"py",
"pyi",
Expand Down Expand Up @@ -107,5 +112,7 @@ def remove_run_async_call(match):
f.write(content)




if __name__ == "__main__":
main()
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ dev = [
"pre-commit>=4.3.0; python_version >= '3.10'",
"mkdocs>=1.6.1",
"mkdocs-material>=9.7.1",
"pytest-benchmark>=5.2.3",
]

[build-system]
Expand Down
Loading
Loading