Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 #v5

- name: Set up Python
uses: actions/setup-python@v2
uses: actions/setup-python@a26af69be951a213d495a4c3e4e4022e16d87065 #v5
with:
python-version: '3.8'
python-version: '3.13'

- name: Install dependencies
run: |
Expand Down
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ result.txt
testing/main.c
*/*compile_commands.json
testing/benchmark_results.txt
testing/test-examples/*
testing/examples/*

# Ignore Python wheel packages (clang-format, clang-tidy)
clang-tidy-1*
Expand Down
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,8 @@ repos:
| Supports passing code string | ✅ via `--style` | ❌ |
| Verbose output | ✅ via `--verbose` | ❌ |

> [!TIP]
> In most cases, there is no significant performance difference between `cpp-linter-hooks` and `mirrors-clang-format`. See the [benchmark results](testing/benchmark.md) for details.

## Contributing

Expand Down
15 changes: 0 additions & 15 deletions docs/benchmark.md

This file was deleted.

6 changes: 6 additions & 0 deletions testing/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,9 @@
pre-commit try-repo ./.. clang-format --verbose --all-files
pre-commit try-repo ./.. clang-tidy --verbose --all-files
```

## Benchmark

```bash
python3 testing/benchmark_hooks.py
```
25 changes: 25 additions & 0 deletions testing/benchmark.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# Benchmarking

This document outlines the benchmarking process for comparing the performance of cpp-linter-hooks and mirrors-clang-format.

> About tests performance can be found at: [![CodSpeed Badge](https://img.shields.io/endpoint?url=https://codspeed.io/badge.json)](https://codspeed.io/cpp-linter/cpp-linter-hooks)

## Running the Benchmark

```bash
python3 testing/benchmark_hooks.py
```

## Results

```bash
# Updated on 2025-09-02
Benchmark Results:

Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs
---------------------+------------------+------------------+------------------+------------------+-----------------
mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5
cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5

Results saved to testing/benchmark_results.txt
```
66 changes: 28 additions & 38 deletions testing/benchmark_hooks.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,54 +8,51 @@
Requirements:
- pre-commit must be installed and available in PATH
- Two config files:
- testing/pre-commit-config-cpp-linter-hooks.yaml
- testing/pre-commit-config-mirrors-clang-format.yaml
- Target files: testing/main.c (or adjust as needed)
- testing/cpp-linter-hooks.yaml
- testing/mirrors-clang-format.yaml
- Target files: testing/examples/*.c (or adjust as needed)
"""

import os
import subprocess
import time
import statistics
import glob

HOOKS = [
{
"name": "cpp-linter-hooks",
"config": "testing/benchmark_hook_1.yaml",
},
{
"name": "mirrors-clang-format",
"config": "testing/benchmark_hook_2.yaml",
},
{
"name": "cpp-linter-hooks",
"config": "testing/benchmark_hook_1.yaml",
},
]

# Automatically find all C/C++ files in testing/ (and optionally src/, include/)
TARGET_FILES = glob.glob("testing/test-examples/*.c", recursive=True)

REPEATS = 5
RESULTS_FILE = "testing/benchmark_results.txt"


def git_clone():
def prepare_code():
try:
subprocess.run(["rm", "-rf", "testing/examples"], check=True)
subprocess.run(
[
"git",
"clone",
"--depth",
"1",
"https://github.com/gouravthakur39/beginners-C-program-examples.git",
"testing/test-examples",
"testing/examples",
],
check=True,
)
except subprocess.CalledProcessError:
pass


def run_hook(config, files):
cmd = ["pre-commit", "run", "--config", config, "--files"] + files
def run_hook(config):
cmd = ["pre-commit", "run", "--config", config, "--all-files"]
start = time.perf_counter()
try:
subprocess.run(cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
Expand All @@ -66,30 +63,16 @@ def run_hook(config, files):
return end - start


def safe_git_restore(files):
# Only restore files tracked by git
tracked = []
for f in files:
result = subprocess.run(
["git", "ls-files", "--error-unmatch", f],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
if result.returncode == 0:
tracked.append(f)
if tracked:
subprocess.run(["git", "restore"] + tracked)


def benchmark():
results = {}
os.chdir("testing/examples")
for hook in HOOKS:
times = []
print(f"\nBenchmarking {hook['name']}...")
for i in range(REPEATS):
safe_git_restore(TARGET_FILES)
prepare_code()
subprocess.run(["pre-commit", "clean"])
t = run_hook(hook["config"], TARGET_FILES)
t = run_hook(hook["config"])
print(f" Run {i + 1}: {t:.3f} seconds")
times.append(t)
results[hook["name"]] = times
Expand Down Expand Up @@ -132,20 +115,27 @@ def report(results):
f.write(line + "\n")
print(f"\nResults saved to {RESULTS_FILE}")

# Write to GitHub Actions summary if available
# Write to GitHub Actions summary
summary_path = os.environ.get("GITHUB_STEP_SUMMARY")
if summary_path:
with open(summary_path, "a") as f:
f.write("## Benchmark Results\n\n")
f.write(header_row + "\n")
f.write("-+-".join("-" * w for w in col_widths) + "\n")
for line in lines:
f.write(line + "\n")
# Markdown table header
md_header = "| " + " | ".join(headers) + " |\n"
md_sep = "|" + "|".join(["-" * (w + 2) for w in col_widths]) + "|\n"
f.write(md_header)
f.write(md_sep)
for name, times in results.items():
avg = statistics.mean(times)
std = statistics.stdev(times) if len(times) > 1 else 0.0
min_t = min(times)
max_t = max(times)
md_row = f"| {name} | {avg:.3f} | {std:.3f} | {min_t:.3f} | {max_t:.3f} | {len(times)} |\n"
f.write(md_row)
f.write("\n")


def main():
git_clone()
results = benchmark()
report(results)

Expand Down
8 changes: 8 additions & 0 deletions testing/benchmark_results.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
Benchmark Results:

Hook | Avg (s) | Std (s) | Min (s) | Max (s) | Runs
---------------------+------------------+------------------+------------------+------------------+-----------------
mirrors-clang-format | 0.116 | 0.003 | 0.113 | 0.118 | 5
cpp-linter-hooks | 0.114 | 0.003 | 0.109 | 0.117 | 5

Results saved to testing/benchmark_results.txt
Loading