Skip to content

Commit 90440d7

Browse files
authored
chore: update benchmark result (#103)
* chore: update benchmark status * fix: update benchmark result link * docs: update documenetation
1 parent 5454502 commit 90440d7

File tree

8 files changed

+73
-57
lines changed

8 files changed

+73
-57
lines changed

.github/workflows/benchmark.yml

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,12 +8,12 @@ jobs:
88
runs-on: ubuntu-latest
99
steps:
1010
- name: Checkout code
11-
uses: actions/checkout@v2
11+
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5
1212

1313
- name: Set up Python
14-
uses: actions/setup-python@v2
14+
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5
1515
with:
16-
python-version: '3.8'
16+
python-version: '3.13'
1717

1818
- name: Install dependencies
1919
run: |

.gitignore

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@ result.txt
1313
testing/main.c
1414
*/*compile_commands.json
1515
testing/benchmark_results.txt
16-
testing/test-examples/*
16+
testing/examples/*
1717

1818
# Ignore Python wheel packages (clang-format, clang-tidy)
1919
clang-tidy-1*

README.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,8 @@ repos:
195195
| Supports passing code string | ✅ via `--style` | ❌ |
196196
| Verbose output | ✅ via `--verbose` | ❌ |
197197

198+
> [!TIP]
199+
> In most cases, there is no significant performance difference between `cpp-linter-hooks` and `mirrors-clang-format`. See the [benchmark results](testing/benchmark.md) for details.
198200

199201
## Contributing
200202

docs/benchmark.md

Lines changed: 0 additions & 15 deletions
This file was deleted.

testing/README.md

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,3 +6,9 @@
66
pre-commit try-repo ./.. clang-format --verbose --all-files
77
pre-commit try-repo ./.. clang-tidy --verbose --all-files
88
```
9+
10+
## Benchmark
11+
12+
```bash
13+
python3 testing/benchmark_hooks.py
14+
```

testing/benchmark.md

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Benchmarking
2+
3+
This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format.
4+
5+
> About tests performance can be found at: [![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks)
6+
7+
## Running the Benchmark
8+
9+
```bash
10+
python3 testing/benchmark_hooks.py
11+
```
12+
13+
## Results
14+
15+
```bash
16+
# Updated on 2025-09-02
17+
Benchmark Results:
18+
19+
Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs
20+
---------------------+------------------+------------------+------------------+------------------+-----------------
21+
mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5
22+
cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5
23+
24+
Results saved to testing/benchmark_results.txt
25+
```

testing/benchmark_hooks.py

Lines changed: 28 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -8,54 +8,51 @@
88
Requirements:
99
- pre-commit must be installed and available in PATH
1010
- Two config files:
11-
- testing/pre-commit-config-cpp-linter-hooks.yaml
12-
- testing/pre-commit-config-mirrors-clang-format.yaml
13-
- Target files: testing/main.c (or adjust as needed)
11+
- testing/cpp-linter-hooks.yaml
12+
- testing/mirrors-clang-format.yaml
13+
- Target files: testing/examples/*.c (or adjust as needed)
1414
"""
1515

1616
import os
1717
import subprocess
1818
import time
1919
import statistics
20-
import glob
2120

2221
HOOKS = [
23-
{
24-
"name": "cpp-linter-hooks",
25-
"config": "testing/benchmark_hook_1.yaml",
26-
},
2722
{
2823
"name": "mirrors-clang-format",
2924
"config": "testing/benchmark_hook_2.yaml",
3025
},
26+
{
27+
"name": "cpp-linter-hooks",
28+
"config": "testing/benchmark_hook_1.yaml",
29+
},
3130
]
3231

33-
# Automatically find all C/C++ files in testing/ (and optionally src/, include/)
34-
TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True)
35-
3632
REPEATS = 5
3733
RESULTS_FILE = "testing/benchmark_results.txt"
3834

3935

40-
def git_clone():
36+
def prepare_code():
4137
try:
38+
subprocess.run(["rm", "-rf", "testing/examples"], check=True)
4239
subprocess.run(
4340
[
4441
"git",
4542
"clone",
4643
"--depth",
4744
"1",
4845
"https://github.com/gouravthakur39/beginners-C-program-examples.git",
49-
"testing/test-examples",
46+
"testing/examples",
5047
],
5148
check=True,
5249
)
5350
except subprocess.CalledProcessError:
5451
pass
5552

5653

57-
def run_hook(config, files):
58-
cmd = ["pre-commit", "run", "--config", config, "--files"] + files
54+
def run_hook(config):
55+
cmd = ["pre-commit", "run", "--config", config, "--all-files"]
5956
start = time.perf_counter()
6057
try:
6158
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
@@ -66,30 +63,16 @@ def run_hook(config, files):
6663
return end - start
6764

6865

69-
def safe_git_restore(files):
70-
# Only restore files tracked by git
71-
tracked = []
72-
for f in files:
73-
result = subprocess.run(
74-
["git", "ls-files", "--error-unmatch", f],
75-
stdout=subprocess.PIPE,
76-
stderr=subprocess.PIPE,
77-
)
78-
if result.returncode == 0:
79-
tracked.append(f)
80-
if tracked:
81-
subprocess.run(["git", "restore"] + tracked)
82-
83-
8466
def benchmark():
8567
results = {}
68+
os.chdir("testing/examples")
8669
for hook in HOOKS:
8770
times = []
8871
print(f"\nBenchmarking {hook['name']}...")
8972
for i in range(REPEATS):
90-
safe_git_restore(TARGET_FILES)
73+
prepare_code()
9174
subprocess.run(["pre-commit", "clean"])
92-
t = run_hook(hook["config"], TARGET_FILES)
75+
t = run_hook(hook["config"])
9376
print(f" Run {i + 1}: {t:.3f} seconds")
9477
times.append(t)
9578
results[hook["name"]] = times
@@ -132,20 +115,27 @@ def report(results):
132115
f.write(line + "\n")
133116
print(f"\nResults saved to {RESULTS_FILE}")
134117

135-
# Write to GitHub Actions summary if available
118+
# Write to GitHub Actions summary
136119
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
137120
if summary_path:
138121
with open(summary_path, "a") as f:
139122
f.write("## Benchmark Results\n\n")
140-
f.write(header_row + "\n")
141-
f.write("-+-".join("-" * w for w in col_widths) + "\n")
142-
for line in lines:
143-
f.write(line + "\n")
123+
# Markdown table header
124+
md_header = "| " + " | ".join(headers) + " |\n"
125+
md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n"
126+
f.write(md_header)
127+
f.write(md_sep)
128+
for name, times in results.items():
129+
avg = statistics.mean(times)
130+
std = statistics.stdev(times) if len(times) > 1 else 0.0
131+
min_t = min(times)
132+
max_t = max(times)
133+
md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n"
134+
f.write(md_row)
144135
f.write("\n")
145136

146137

147138
def main():
148-
git_clone()
149139
results = benchmark()
150140
report(results)
151141

testing/benchmark_results.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
Benchmark Results:
2+
3+
Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs
4+
---------------------+------------------+------------------+------------------+------------------+-----------------
5+
mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5
6+
cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5
7+
8+
Results saved to testing/benchmark_results.txt

0 commit comments

Comments
 (0)