diff --git a/.github/workflows/bench.yml b/.github/workflows/bench.yml new file mode 100644 index 0000000..ed8eb20 --- /dev/null +++ b/.github/workflows/bench.yml @@ -0,0 +1,61 @@ +name: Benchmarks + +on: + push: + branches: [main] + workflow_dispatch: + +permissions: + contents: write + deployments: write + +concurrency: + group: benchmarks + cancel-in-progress: false + +jobs: + benchmark: + name: Run Benchmarks + runs-on: blacksmith-8vcpu-ubuntu-2404 + timeout-minutes: 60 + steps: + - uses: actions/checkout@v4 + + - name: Install uv + uses: astral-sh/setup-uv@v5 + + - name: Set up Python + run: uv python install + + - name: Install dependencies + run: uv sync + + - name: Install Rust toolchain + uses: dtolnay/rust-toolchain@stable + + - name: Build mesafs binary (release) + run: cargo build --release + + - name: Run benchmarks + run: >- + uv run gitfs-bench + --mesafs-binary target/release/git-fs + --output-format github-action-benchmark + > bench-output.json + env: + MESA_TEST_API_KEY: ${{ secrets.MESA_TEST_API_KEY }} + + - name: Store benchmark result + uses: benchmark-action/github-action-benchmark@v1 + with: + tool: customSmallerIsBetter + output-file-path: bench-output.json + github-token: ${{ secrets.GITHUB_TOKEN }} + auto-push: true + gh-pages-branch: gh-pages + benchmark-data-dir-path: dev/bench + name: gitfs Benchmarks + alert-threshold: "150%" + comment-on-alert: true + fail-on-alert: false + summary-always: true diff --git a/bench/__init__.py b/bench/__init__.py new file mode 100644 index 0000000..1bf78c1 --- /dev/null +++ b/bench/__init__.py @@ -0,0 +1,55 @@ +"""Benchmark harness for gitfs.""" + +from __future__ import annotations + +import importlib +import pkgutil +from typing import TYPE_CHECKING + +from bench.fixture import ( + _REGISTRY, # pyright: ignore[reportPrivateUsage] + BenchConfig, + BenchmarkResult, + bench, + bench_many, + git_bench, + mesafs_bench, +) + +if TYPE_CHECKING: + from collections.abc import Callable + +__all__ = [ + "BenchConfig", + "BenchmarkResult", + "bench", + "bench_many", + "collect_benchmarks", + "git_bench", + "mesafs_bench", + "run_benchmarks", +] + + +def collect_benchmarks() -> list[Callable[[BenchConfig], BenchmarkResult]]: + """Import all submodules of the bench package and return registered benchmarks. + + Walking the package triggers ``@bench`` / ``@git_bench`` decorators, + which append to the internal registry. Returns a snapshot of the + registry at time of call. + """ + import bench as _pkg + + for info in pkgutil.walk_packages(_pkg.__path__, prefix=f"{_pkg.__name__}."): + importlib.import_module(info.name) + + return list(_REGISTRY) + + +def run_benchmarks(config: BenchConfig) -> list[tuple[str, BenchmarkResult]]: + """Collect and sequentially run all benchmarks. + + Returns a list of ``(name, result)`` tuples in execution order. + """ + benchmarks = collect_benchmarks() + return [(fn.__name__, fn(config)) for fn in benchmarks] diff --git a/bench/__main__.py b/bench/__main__.py new file mode 100644 index 0000000..d2cdb88 --- /dev/null +++ b/bench/__main__.py @@ -0,0 +1,74 @@ +"""CLI entry point for the benchmark harness.""" + +from __future__ import annotations + +import json +from dataclasses import asdict +from typing import Any + +import click + +from bench import BenchConfig, BenchmarkResult, run_benchmarks +from bench.util import warm_sudo + + +def _to_github_action_benchmark( + results: list[tuple[str, BenchmarkResult]], +) -> list[dict[str, Any]]: + """Transform benchmark results to ``github-action-benchmark`` JSON format. + + Produces entries compatible with the ``customSmallerIsBetter`` format + expected by ``benchmark-action/github-action-benchmark``. + """ + return [ + { + "name": name, + "unit": "seconds", + "value": round(result.median_s, 6), + "range": str(round(result.stdev_s, 6)), + "extra": ( + f"iterations: {result.iterations}\n" + f"min: {result.min_s:.6f}s\n" + f"max: {result.max_s:.6f}s\n" + f"mean: {result.mean_s:.6f}s" + ), + } + for name, result in results + ] + + +@click.command() +@click.option( + "--token", + envvar="MESA_TEST_API_KEY", + required=True, + help="API token for depot.mesa.dev", +) +@click.option( + "--mesafs-binary", + envvar="MESAFS_BINARY", + required=True, + help="Path to the mesafs (git-fs) binary.", +) +@click.option( + "--output-format", + type=click.Choice(["default", "github-action-benchmark"]), + default="default", + help="Output format for benchmark results.", +) +def main(token: str, mesafs_binary: str, output_format: str) -> None: + """Run all registered gitfs benchmarks and print JSON results.""" + warm_sudo() + config = BenchConfig(token=token, mesafs_binary=mesafs_binary) + results = run_benchmarks(config) + + if output_format == "github-action-benchmark": + output: list[dict[str, Any]] = _to_github_action_benchmark(results) + else: + output = [{"name": name, **asdict(result)} for name, result in results] + + click.echo(json.dumps(output, indent=2)) + + +if __name__ == "__main__": + main() diff --git a/bench/fixture.py b/bench/fixture.py new file mode 100644 index 0000000..9b797d8 --- /dev/null +++ b/bench/fixture.py @@ -0,0 +1,340 @@ +"""Benchmark decorator and result type.""" + +from __future__ import annotations + +import contextlib +import functools +import os +import shutil +import statistics +import subprocess +import tempfile +import time +from dataclasses import dataclass +from pathlib import Path +from typing import TYPE_CHECKING, Any + +from bench.ui import run_with_live_progress +from bench.util import flush_disk_caches + +if TYPE_CHECKING: + from collections.abc import Callable, Generator + + +@dataclass(frozen=True) +class BenchConfig: + """Configuration passed to every benchmark at execution time.""" + + token: str + mesafs_binary: str + + +@dataclass(frozen=True) +class BenchmarkResult: + """Aggregated timing statistics from a benchmark run.""" + + iterations: int + min_s: float + max_s: float + mean_s: float + median_s: float + stdev_s: float + + +_REGISTRY: list[Callable[[BenchConfig], BenchmarkResult]] = [] + + +def bench( # noqa: PLR0913 + prepare: Callable[..., Generator[Any]], + *, + setup: Callable[[BenchConfig], Generator[Any]] | None = None, + label: str | None = None, + min_rounds: int = 5, + max_rounds: int = 100, + cv_threshold: float = 0.05, +) -> Callable[..., Any]: + """Register a benchmark whose setup is provided by *prepare*. + + ``prepare`` receives a :class:`BenchConfig` and returns a generator + (context-manager style). It is entered before each iteration; + the yielded value is passed as the first positional argument. + Cleanup happens after each iteration completes (or raises). + + When *setup* is provided it must also be a single-yield generator + accepting a :class:`BenchConfig`. It is entered **once** for the + entire benchmark run (before the first iteration) and its yielded + value is forwarded to *prepare* as a second argument. Cleanup + runs after all iterations complete (or on error). Use *setup* for + expensive one-time work (e.g. cloning a repository) and *prepare* + for cheap per-iteration work (e.g. flushing disk caches). + + The decorated function's signature becomes ``fn(ctx) -> None``. + Timing is measured by the harness. Iterations continue until either + the coefficient of variation (stdev / mean) drops below + *cv_threshold*, or *max_rounds* is reached — whichever comes first. + At least *min_rounds* iterations are always executed. + """ + _min_for_stdev = 2 + if min_rounds < _min_for_stdev: + msg = f"min_rounds must be >= {_min_for_stdev}, got {min_rounds}" + raise ValueError(msg) + if max_rounds < min_rounds: + msg = f"max_rounds ({max_rounds}) must be >= min_rounds ({min_rounds})" + raise ValueError(msg) + + resolved_label = label if label is not None else prepare.__qualname__ + + def decorator( + fn: Callable[..., None], + ) -> Callable[[BenchConfig], BenchmarkResult]: + @functools.wraps(fn) + def wrapper(config: BenchConfig) -> BenchmarkResult: + setup_gen = None + setup_ctx = None + if setup is not None: + setup_gen = setup(config) + setup_ctx = next(setup_gen) + + try: + def run_one() -> float: + gen = ( + prepare(config, setup_ctx) + if setup is not None + else prepare(config) + ) + ctx = next(gen) + try: + t0 = time.perf_counter() + fn(ctx) + return time.perf_counter() - t0 + finally: + with contextlib.suppress(StopIteration): + next(gen) + + timings = run_with_live_progress( + wrapper.__name__, + run_one, + min_rounds=min_rounds, + max_rounds=max_rounds, + cv_threshold=cv_threshold, + ) + + return BenchmarkResult( + iterations=len(timings), + min_s=min(timings), + max_s=max(timings), + mean_s=statistics.mean(timings), + median_s=statistics.median(timings), + stdev_s=statistics.stdev(timings), + ) + finally: + if setup_gen is not None: + with contextlib.suppress(StopIteration): + next(setup_gen) + + wrapper.__name__ = f"{fn.__name__}[{resolved_label}]" + existing = {w.__name__ for w in _REGISTRY} + if wrapper.__name__ in existing: + msg = f"duplicate benchmark name: {wrapper.__name__}" + raise ValueError(msg) + _REGISTRY.append(wrapper) + return wrapper + + return decorator + + +def git_bench( + repo: str, + *, + flush_caches: bool = True, + min_rounds: int = 5, + max_rounds: int = 100, + cv_threshold: float = 0.05, +) -> Callable[..., Any]: + """Clone *repo* once and pass the checkout path to each iteration. + + The clone happens in ``setup`` (once per benchmark). Each iteration's + ``prepare`` only flushes disk caches (when enabled) so that every round + benchmarks against cold storage without re-cloning. + + Usage:: + + @git_bench("mesa-dot-dev/gitfs") + def my_benchmark(repo_path: Path) -> None: + ... + + When *flush_caches* is ``True`` (the default), kernel disk caches are + dropped before each iteration so that the benchmark body measures + cold-storage performance. Set to ``False`` to benchmark with warm caches. + """ + + def setup(config: BenchConfig) -> Generator[Path]: + url = f"https://{config.token}@depot.mesa.dev/{repo}.git" + with tempfile.TemporaryDirectory( + prefix=f"bench-{repo.replace('/', '-')}-", + ignore_cleanup_errors=True, + ) as tmp: + dest = Path(tmp) / repo.rsplit("/", maxsplit=1)[-1] + subprocess.run( + ["git", "clone", url, str(dest)], + check=True, + capture_output=True, + ) + yield dest + + def prepare(_config: BenchConfig, setup_ctx: Path) -> Generator[Path]: + if flush_caches: + flush_disk_caches() + yield setup_ctx + + return bench( + prepare=prepare, + setup=setup, + label=f"gitbench:{repo}", + min_rounds=min_rounds, + max_rounds=max_rounds, + cv_threshold=cv_threshold, + ) + + +_MESAFS_READY_POLL_INTERVAL = 0.5 +_MESAFS_READY_TIMEOUT = 30 + + +def mesafs_bench( + repo: str, + *, + flush_caches: bool = True, + min_rounds: int = 5, + max_rounds: int = 100, + cv_threshold: float = 0.05, +) -> Callable[..., Any]: + """Mount *repo* via mesafs and pass the mount path to the body. + + Convenience wrapper around :func:`bench` that spawns the mesafs binary, + waits for the FUSE mount to become ready, and yields the repository + path inside the mount. + + When *flush_caches* is ``True`` (the default), kernel disk caches are + dropped after the mount is ready so that the benchmark body measures + cold-storage performance. Set to ``False`` to benchmark with warm caches. + + Usage:: + + @mesafs_bench("mesa-ci/planventure") + def my_benchmark(mount_path: Path) -> None: + ... + """ + + def prepare(config: BenchConfig) -> Generator[Path]: + org_name, repo_name = repo.rsplit("/", maxsplit=1) + + dir_name = f"bench-mesafs-{repo.replace('/', '-')}" + base_dir = Path(tempfile.gettempdir()) / dir_name + if base_dir.exists(): + shutil.rmtree(base_dir) + base_dir.mkdir() + mount_path = Path(base_dir) / "mnt" + mount_path.mkdir() + cache_path = Path(base_dir) / "cache" + cache_path.mkdir() + pid_path = Path(base_dir) / "git-fs.pid" + + config_content = ( + f'mount-point = "{mount_path}"\n' + f"uid = {os.getuid()}\n" + f"gid = {os.getgid()}\n" + f"\n" + f"[cache]\n" + f'path = "{cache_path}"\n' + f"\n" + f"[daemon]\n" + f'pid-file = "{pid_path}"\n' + f"\n" + f"[organizations.{org_name}]\n" + f'api-key = "{config.token}"\n' + ) + config_file = Path(base_dir) / "config.toml" + config_file.write_text(config_content) + + log_path = Path(base_dir) / "mesafs.log" + log_file = log_path.open("w") + proc = subprocess.Popen( + [config.mesafs_binary, "--config-path", str(config_file), "run"], + stdout=log_file, + stderr=subprocess.STDOUT, + ) + + try: + repo_path = mount_path / org_name / repo_name + deadline = time.monotonic() + _MESAFS_READY_TIMEOUT + while time.monotonic() < deadline: + if repo_path.is_dir(): + break + if proc.poll() is not None: + log_file.close() + logs = log_path.read_text() + msg = ( + f"mesafs exited early with code {proc.returncode}\n" + f"--- mesafs logs ---\n{logs}" + ) + raise RuntimeError(msg) + time.sleep(_MESAFS_READY_POLL_INTERVAL) + else: + log_file.close() + logs = log_path.read_text() + msg = ( + f"mesafs mount not ready within {_MESAFS_READY_TIMEOUT}s\n" + f"--- mesafs logs ---\n{logs}" + ) + raise TimeoutError(msg) + + if flush_caches: + flush_disk_caches() + yield repo_path + finally: + proc.terminate() + try: + proc.wait(timeout=10) + except subprocess.TimeoutExpired: + proc.kill() + proc.wait() + log_file.close() + shutil.rmtree(base_dir, ignore_errors=True) + + return bench( + prepare=prepare, + label=f"mesafs:{repo}", + min_rounds=min_rounds, + max_rounds=max_rounds, + cv_threshold=cv_threshold, + ) + + +def bench_many( + decorators: list[Callable[..., Any]], +) -> Callable[..., Any]: + """Apply multiple bench decorators to a single function body. + + Each decorator in *decorators* is applied independently, registering + a separate benchmark with its own label. The decorated function + itself is returned unchanged (the last decorator's wrapper wins as + the return value, but all are registered in the global registry). + + Usage:: + + @bench_many([ + git_bench("mesa-ci/planventure"), + git_bench("foo/bar"), + ]) + def bench_list_root(repo_path: Path) -> None: + list(repo_path.iterdir()) + """ + + def outer(fn: Callable[..., Any]) -> Callable[..., Any]: + result = fn + for dec in decorators: + result = dec(fn) + return result + + return outer diff --git a/bench/sample.py b/bench/sample.py new file mode 100644 index 0000000..7e8a497 --- /dev/null +++ b/bench/sample.py @@ -0,0 +1,26 @@ +"""Sample benchmark to validate the harness works end-to-end.""" + +from __future__ import annotations + +from pathlib import Path + +from bench import bench_many, git_bench, mesafs_bench + + +@bench_many([ + #git_bench("mesa-ci/planventure"), + mesafs_bench("mesa-ci/planventure"), +]) +def bench_list_root(repo_path: Path) -> None: + """List the root directory of the cloned repo.""" + for path in Path(repo_path).rglob("*"): + _ = path + + +@bench_many([ + mesafs_bench("mesa-ci/planventure"), +]) +def bench_list_root_mesafs(mount_path: Path) -> None: + """List root directory via mesafs FUSE mount.""" + for path in Path(mount_path).rglob("*"): + _ = path diff --git a/bench/ui.py b/bench/ui.py new file mode 100644 index 0000000..09616c6 --- /dev/null +++ b/bench/ui.py @@ -0,0 +1,79 @@ +"""Live progress display for benchmark runs.""" + +from __future__ import annotations + +import statistics +from typing import TYPE_CHECKING + +from rich.console import Console +from rich.progress import BarColumn, Progress, TextColumn + +if TYPE_CHECKING: + from collections.abc import Callable + + +def run_with_live_progress( + name: str, + run_one: Callable[[], float], + *, + min_rounds: int, + max_rounds: int, + cv_threshold: float, +) -> list[float]: + """Execute benchmark rounds with a live progress bar on stderr. + + *run_one* is called once per round and must return the elapsed time + in seconds. The loop runs adaptively: at least *min_rounds*, then + continues until the coefficient of variation drops to *cv_threshold* + or *max_rounds* is reached. + + Returns the collected timings. + """ + console = Console(stderr=True) + timings: list[float] = [] + + progress = Progress( + TextColumn("{task.description}"), + BarColumn(), + TextColumn("{task.fields[stats]}"), + console=console, + transient=True, + ) + + with progress: + task = progress.add_task(name, total=max_rounds, stats="starting…") + + for i in range(max_rounds): + elapsed = run_one() + timings.append(elapsed) + n = i + 1 + + mean = statistics.mean(timings) + cv = ( + statistics.stdev(timings) / mean + if n > 1 and mean > 0 + else float("inf") + ) + + converged = n >= min_rounds and cv <= cv_threshold + + if converged: + status = "converged" + elif n >= max_rounds: + status = "max rounds" + else: + status = "running" + + stats = ( + f"round={n}/{max_rounds}" + f" last={elapsed * 1000:.2f}ms" + f" mean={mean * 1000:.2f}ms" + f" cv={cv:.2%}" + f" status={status}" + ) + progress.update(task, completed=n, stats=stats) + + if converged: + break + + return timings diff --git a/bench/util.py b/bench/util.py new file mode 100644 index 0000000..da8fe78 --- /dev/null +++ b/bench/util.py @@ -0,0 +1,52 @@ +"""Benchmark utilities.""" + +from __future__ import annotations + +import os +import platform +import subprocess +import time + + +def warm_sudo() -> None: + """Validate sudo credentials so later sudo calls are non-interactive. + + Runs ``sudo -v`` which prompts for the password (if needed) and + refreshes the credential cache. Call this once before entering + a ``rich.live.Live`` context so the prompt is visible. + """ + if os.geteuid() != 0: + subprocess.run(["sudo", "-v"], check=True) + + +def flush_disk_caches() -> None: + """Drop kernel disk caches so benchmarks hit cold storage. + + On macOS this runs ``sudo purge``. On Linux this runs + ``sync`` followed by ``echo 3 > /proc/sys/vm/drop_caches``. + + If the current process is not running as root, ``sudo`` is used + and may prompt the user for their password. + """ + system = platform.system() + + match system: + case "Darwin": + cmd = ["purge"] + if os.geteuid() != 0: + cmd = ["sudo", *cmd] + subprocess.run(cmd, check=True) + + case "Linux": + subprocess.run(["sync"], check=True) + cmd = ["sh", "-c", "echo 3 > /proc/sys/vm/drop_caches"] + if os.geteuid() != 0: + cmd = ["sudo", *cmd] + subprocess.run(cmd, check=True) + + case _: + msg = f"flush_disk_caches is not supported on {system}" + raise NotImplementedError(msg) + + # Add a little bit of delay to let the system stabilize + time.sleep(0.3) diff --git a/pyproject.toml b/pyproject.toml index ae2c46e..03b6bb2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,9 +1,22 @@ [project] -name = "git-fs-tests" +name = "git-fs" version = "0.0.0" description = "Integration tests for git-fs (Rust/FUSE). Not a Python package." requires-python = ">=3.14" -dependencies = [] +dependencies = [ + "click>=8.1", + "rich>=14", +] + +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[tool.hatch.build.targets.wheel] +packages = ["bench"] + +[project.scripts] +gitfs-bench = "bench.__main__:main" [dependency-groups] dev = [ @@ -40,6 +53,11 @@ ignore = [ "S", # It's tests, no need for security "PLC0415", # RPyC teleport requires imports inside function body ] +"bench/*" = [ + "S", # No security lint in benchmarks + "T201", # Allow print() + "PLC0415", # Benchmark module imports inside function body +] [tool.ruff.format] quote-style = "double" @@ -48,7 +66,7 @@ indent-style = "space" [tool.pyright] pythonVersion = "3.14" typeCheckingMode = "strict" -include = ["tests"] +include = ["tests", "bench"] reportMissingTypeStubs = false reportUnknownMemberType = false reportUnknownVariableType = false diff --git a/tests/test_git_bench_flush.py b/tests/test_git_bench_flush.py new file mode 100644 index 0000000..93e347a --- /dev/null +++ b/tests/test_git_bench_flush.py @@ -0,0 +1,101 @@ +"""Tests for the flush_caches parameter of git_bench.""" + +from __future__ import annotations + +from pathlib import Path +from unittest.mock import MagicMock, patch + +import pytest + +from bench.fixture import BenchConfig, _REGISTRY, git_bench + + +@pytest.fixture(autouse=True) +def _clean_registry(): + """Snapshot and restore the benchmark registry around each test.""" + snapshot = list(_REGISTRY) + yield + _REGISTRY.clear() + _REGISTRY.extend(snapshot) + + +def test_git_bench_accepts_flush_caches_false(): + """git_bench() should accept flush_caches=False without error.""" + + @git_bench("owner/repo", flush_caches=False) + def my_bench(repo_path: Path) -> None: # noqa: ARG001 + pass # pragma: no cover + + +def test_git_bench_accepts_flush_caches_true(): + """git_bench() should accept flush_caches=True (explicit default).""" + + @git_bench("owner/repo2", flush_caches=True) + def my_bench(repo_path: Path) -> None: # noqa: ARG001 + pass # pragma: no cover + + +def test_git_bench_default_flush_caches(): + """git_bench() should work without specifying flush_caches (default True).""" + + @git_bench("owner/repo3") + def my_bench(repo_path: Path) -> None: # noqa: ARG001 + pass # pragma: no cover + + +def test_flush_caches_true_calls_flush(tmp_path: Path) -> None: + """flush_disk_caches is called during prepare when flush_caches=True.""" + with ( + patch("bench.fixture.flush_disk_caches") as mock_flush, + patch("bench.fixture.subprocess.run"), + patch("bench.fixture.tempfile.TemporaryDirectory") as mock_tmpdir, + patch("bench.fixture.run_with_live_progress") as mock_progress, + ): + mock_tmpdir.return_value.__enter__ = MagicMock(return_value=str(tmp_path)) + mock_tmpdir.return_value.__exit__ = MagicMock(return_value=False) + + def call_run_one( + name: str, # noqa: ARG001 + run_one: object, + **kwargs: object, # noqa: ARG001 + ) -> list[float]: + # stdev requires >= 2 data points + return [run_one(), run_one()] # type: ignore[operator] + + mock_progress.side_effect = call_run_one + + @git_bench("owner/flush-true-behavioral", flush_caches=True) + def my_bench(repo_path: Path) -> None: # noqa: ARG001 + pass + + my_bench(BenchConfig(token="fake", mesafs_binary="/fake/bin")) + mock_flush.assert_called() + + +def test_flush_caches_false_skips_flush(tmp_path: Path) -> None: + """flush_disk_caches is NOT called during prepare when flush_caches=False.""" + with ( + patch("bench.fixture.flush_disk_caches") as mock_flush, + patch("bench.fixture.subprocess.run"), + patch("bench.fixture.tempfile.TemporaryDirectory") as mock_tmpdir, + patch("bench.fixture.run_with_live_progress") as mock_progress, + ): + mock_tmpdir.return_value.__enter__ = MagicMock(return_value=str(tmp_path)) + mock_tmpdir.return_value.__exit__ = MagicMock(return_value=False) + + def call_run_one( + name: str, # noqa: ARG001 + run_one: object, + **kwargs: object, # noqa: ARG001 + ) -> list[float]: + # stdev requires >= 2 data points + return [run_one(), run_one()] # type: ignore[operator] + + mock_progress.side_effect = call_run_one + + @git_bench("owner/flush-false-behavioral", flush_caches=False) + def my_bench(repo_path: Path) -> None: # noqa: ARG001 + pass + + my_bench(BenchConfig(token="fake", mesafs_binary="/fake/bin")) + mock_flush.assert_not_called() diff --git a/uv.lock b/uv.lock index f7de7d0..464944d 100644 --- a/uv.lock +++ b/uv.lock @@ -36,6 +36,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, ] +[[package]] +name = "click" +version = "8.3.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "colorama", marker = "sys_platform == 'win32'" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" }, +] + [[package]] name = "cloudpickle" version = "3.1.2" @@ -78,9 +90,13 @@ wheels = [ ] [[package]] -name = "git-fs-tests" +name = "git-fs" version = "0.0.0" -source = { virtual = "." } +source = { editable = "." } +dependencies = [ + { name = "click" }, + { name = "rich" }, +] [package.dev-dependencies] dev = [ @@ -93,6 +109,10 @@ dev = [ ] [package.metadata] +requires-dist = [ + { name = "click", specifier = ">=8.1" }, + { name = "rich", specifier = ">=14" }, +] [package.metadata.requires-dev] dev = [ @@ -122,6 +142,27 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" }, ] +[[package]] +name = "markdown-it-py" +version = "4.0.0" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "mdurl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/5b/f5/4ec618ed16cc4f8fb3b701563655a69816155e79e24a17b651541804721d/markdown_it_py-4.0.0.tar.gz", hash = "sha256:cb0a2b4aa34f932c007117b194e945bd74e0ec24133ceb5bac59009cda1cb9f3", size = 73070, upload-time = "2025-08-11T12:57:52.854Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" }, +] + +[[package]] +name = "mdurl" +version = "0.1.2" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/d6/54/cfe61301667036ec958cb99bd3efefba235e65cdeb9c84d24a8293ba1d90/mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba", size = 8729, upload-time = "2022-08-14T12:40:10.846Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/b3/38/89ba8ad64ae25be8de66a6d463314cf1eb366222074cfda9ee839c56a4b4/mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8", size = 9979, upload-time = "2022-08-14T12:40:09.779Z" }, +] + [[package]] name = "nodeenv" version = "1.10.0" @@ -274,6 +315,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/1e/db/4254e3eabe8020b458f1a747140d32277ec7a271daf1d235b70dc0b4e6e3/requests-2.32.5-py3-none-any.whl", hash = "sha256:2462f94637a34fd532264295e186976db0f5d453d1cdd31473c85a6a161affb6", size = 64738, upload-time = "2025-08-18T20:46:00.542Z" }, ] +[[package]] +name = "rich" +version = "14.3.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "markdown-it-py" }, + { name = "pygments" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, +] + [[package]] name = "rpyc" version = "6.0.2"