Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
name: Benchmarks

on:
push:
branches: [main]
workflow_dispatch:

permissions:
contents: write
deployments: write

concurrency:
group: benchmarks
cancel-in-progress: false

jobs:
benchmark:
name: Run Benchmarks
runs-on: blacksmith-8vcpu-ubuntu-2404
timeout-minutes: 60
steps:
- uses: actions/checkout@v4

- name: Install uv
uses: astral-sh/setup-uv@v5

- name: Set up Python
run: uv python install

- name: Install dependencies
run: uv sync

- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable

- name: Build mesafs binary (release)
run: cargo build --release

- name: Run benchmarks
run: >-
uv run gitfs-bench
--mesafs-binary target/release/git-fs
--output-format github-action-benchmark
> bench-output.json
env:
MESA_TEST_API_KEY: ${{ secrets.MESA_TEST_API_KEY }}

- name: Store benchmark result
uses: benchmark-action/github-action-benchmark@v1
with:
tool: customSmallerIsBetter
output-file-path: bench-output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: true
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
name: gitfs Benchmarks
alert-threshold: "150%"
comment-on-alert: true
fail-on-alert: false
summary-always: true
55 changes: 55 additions & 0 deletions bench/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
"""Benchmark harness for gitfs."""

from __future__ import annotations

import importlib
import pkgutil
from typing import TYPE_CHECKING

from bench.fixture import (
_REGISTRY, # pyright: ignore[reportPrivateUsage]
BenchConfig,
BenchmarkResult,
bench,
bench_many,
git_bench,
mesafs_bench,
)

if TYPE_CHECKING:
from collections.abc import Callable

__all__ = [
"BenchConfig",
"BenchmarkResult",
"bench",
"bench_many",
"collect_benchmarks",
"git_bench",
"mesafs_bench",
"run_benchmarks",
]


def collect_benchmarks() -> list[Callable[[BenchConfig], BenchmarkResult]]:
"""Import all submodules of the bench package and return registered benchmarks.

Walking the package triggers ``@bench`` / ``@git_bench`` decorators,
which append to the internal registry. Returns a snapshot of the
registry at time of call.
"""
import bench as _pkg

for info in pkgutil.walk_packages(_pkg.__path__, prefix=f"{_pkg.__name__}."):
importlib.import_module(info.name)

return list(_REGISTRY)


def run_benchmarks(config: BenchConfig) -> list[tuple[str, BenchmarkResult]]:
"""Collect and sequentially run all benchmarks.

Returns a list of ``(name, result)`` tuples in execution order.
"""
benchmarks = collect_benchmarks()
return [(fn.__name__, fn(config)) for fn in benchmarks]
74 changes: 74 additions & 0 deletions bench/__main__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
"""CLI entry point for the benchmark harness."""

from __future__ import annotations

import json
from dataclasses import asdict
from typing import Any

import click

from bench import BenchConfig, BenchmarkResult, run_benchmarks
from bench.util import warm_sudo


def _to_github_action_benchmark(
results: list[tuple[str, BenchmarkResult]],
) -> list[dict[str, Any]]:
"""Transform benchmark results to ``github-action-benchmark`` JSON format.
Produces entries compatible with the ``customSmallerIsBetter`` format
expected by ``benchmark-action/github-action-benchmark``.
"""
return [
{
"name": name,
"unit": "seconds",
"value": round(result.median_s, 6),
"range": str(round(result.stdev_s, 6)),
"extra": (
f"iterations: {result.iterations}\n"
f"min: {result.min_s:.6f}s\n"
f"max: {result.max_s:.6f}s\n"
f"mean: {result.mean_s:.6f}s"
),
}
for name, result in results
]


@click.command()
@click.option(
"--token",
envvar="MESA_TEST_API_KEY",
required=True,
help="API token for depot.mesa.dev",
)
@click.option(
"--mesafs-binary",
envvar="MESAFS_BINARY",
required=True,
help="Path to the mesafs (git-fs) binary.",
)
@click.option(
"--output-format",
type=click.Choice(["default", "github-action-benchmark"]),
default="default",
help="Output format for benchmark results.",
)
def main(token: str, mesafs_binary: str, output_format: str) -> None:
"""Run all registered gitfs benchmarks and print JSON results."""
warm_sudo()
config = BenchConfig(token=token, mesafs_binary=mesafs_binary)
results = run_benchmarks(config)

if output_format == "github-action-benchmark":
output: list[dict[str, Any]] = _to_github_action_benchmark(results)
else:
output = [{"name": name, **asdict(result)} for name, result in results]

click.echo(json.dumps(output, indent=2))


if __name__ == "__main__":
main()
Loading
Loading