Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
115 changes: 8 additions & 107 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,59 +7,16 @@ on:
workflow_dispatch:

jobs:
lint:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: pre-commit/action@v3.0.1
with:
extra_args: --all-files

tests:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
submodules: "recursive"

- name: Cache build
uses: actions/cache@v3
with:
path: core/build-tests
key: ${{ runner.os }}-build-tests-${{ hashFiles('**/CMakeLists.txt') }}

- name: Create build directory
run: mkdir -p core/build-tests

- name: Build tests
run: |
cd core/build-tests
cmake .. -DENABLE_TESTS=ON
make -j

- name: Run tests
run: |
cd core/build-tests
GTEST_OUTPUT=json:test-results/ ctest

- name: Upload test results
uses: actions/upload-artifact@v4
if: failure()
with:
name: test_results
path: ${{runner.workspace}}/core/build-tests/test/test-results/**/*.json

cmake-integration-tests:
strategy:
matrix:
include:
- codspeed-mode: "instrumentation"
runner: "ubuntu-latest"
- codspeed-mode: "walltime"
runner: "codspeed-macro"
- codspeed-mode: "off"
runner: "ubuntu-latest"
# - codspeed-mode: "walltime"
# runner: "codspeed-macro"
# - codspeed-mode: "off"
# runner: "ubuntu-latest"
runs-on: ${{ matrix.runner }}
steps:
- name: Checkout code
Expand Down Expand Up @@ -96,10 +53,10 @@ jobs:
include:
- codspeed-mode: "instrumentation"
runner: "ubuntu-latest"
- codspeed-mode: "walltime"
runner: "codspeed-macro"
- codspeed-mode: "off"
runner: "ubuntu-latest"
# - codspeed-mode: "walltime"
# runner: "codspeed-macro"
# - codspeed-mode: "off"
# runner: "ubuntu-latest"
runs-on: ${{ matrix.runner }}
steps:
- uses: actions/checkout@v4
Expand Down Expand Up @@ -127,59 +84,3 @@ jobs:
mode: ${{ matrix.codspeed-mode }}
run: bazel run //examples/google_benchmark_bazel:my_benchmark --//core:codspeed_mode=${{ matrix.codspeed-mode }}
token: ${{ secrets.CODSPEED_TOKEN }}

windows-cmake-build:
strategy:
matrix:
codspeed-mode: ["off", "walltime"]
runs-on: windows-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
submodules: "recursive"

- name: Cache build
uses: actions/cache@v3
with:
path: examples/google_benchmark_cmake/build
key: ${{ runner.os }}-build-${{ matrix.codspeed-mode }}-${{ hashFiles('**/CMakeLists.txt') }}

- name: Create build directory
run: |
if (-not (Test-Path examples\google_benchmark_cmake\build)) {
mkdir examples\google_benchmark_cmake\build
}
shell: pwsh

- name: Build benchmark example
run: |
cd examples\google_benchmark_cmake\build
cmake -DCODSPEED_MODE=${{ matrix.codspeed-mode }} ..
cmake --build . --config Release
shell: pwsh

windows-bazel-build:
strategy:
matrix:
codspeed-mode: ["off", "walltime"]
runs-on: windows-latest
steps:
- uses: actions/checkout@v4
with:
submodules: "recursive"

- name: Set up Bazel
uses: bazel-contrib/setup-bazel@0.14.0
with:
# Avoid downloading Bazel every time.
bazelisk-cache: true
# Store build cache per workflow.
disk-cache: ${{ github.workflow }}
# Share repository cache between workflows.
repository-cache: true

- name: Build benchmark example
run: |
bazel build //examples/google_benchmark_bazel:my_benchmark --//core:codspeed_mode=${{ matrix.codspeed-mode }}
shell: pwsh
113 changes: 113 additions & 0 deletions compare_workflow_durations.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
#!/usr/bin/env python3
"""
Compare workflow durations between two branches.
"""

import json
import subprocess
import statistics
from datetime import datetime
from typing import List, Dict


def get_workflow_runs(branch: str, limit: int = 20) -> List[Dict]:
"""Fetch workflow runs for a specific branch."""
cmd = [
"gh", "run", "list",
"--workflow", "ci.yml",
"--branch", branch,
"--limit", str(limit),
"--json", "startedAt,updatedAt,status,conclusion,number"
]

result = subprocess.run(cmd, capture_output=True, text=True, check=True)
runs = json.loads(result.stdout)

# Filter only completed runs and calculate duration
completed_runs = []
for run in runs:
if run['status'] == 'completed' and run['startedAt'] and run['updatedAt']:
started = datetime.fromisoformat(run['startedAt'].replace('Z', '+00:00'))
updated = datetime.fromisoformat(run['updatedAt'].replace('Z', '+00:00'))
duration_ms = (updated - started).total_seconds() * 1000
run['durationMs'] = duration_ms
completed_runs.append(run)

return completed_runs


def calculate_stats(durations: List[float]) -> Dict[str, float]:
"""Calculate statistics for durations."""
if not durations:
return {"count": 0, "avg_seconds": 0, "median_seconds": 0, "avg_minutes": 0, "median_minutes": 0}

avg_ms = statistics.mean(durations)
median_ms = statistics.median(durations)

return {
"count": len(durations),
"avg_seconds": avg_ms / 1000,
"median_seconds": median_ms / 1000,
"avg_minutes": avg_ms / 60000,
"median_minutes": median_ms / 60000,
}


def main():
branches = ["test-action-no-cache", "test-action-cache-effects"]

print("Fetching workflow runs...\n")

results = {}
for branch in branches:
print(f"Fetching runs for branch: {branch}")
runs = get_workflow_runs(branch, limit=20)
durations = [run['durationMs'] for run in runs]
stats = calculate_stats(durations)
results[branch] = stats

print(f" Found {len(runs)} completed runs")

# Display results
print("\n" + "=" * 80)
print("WORKFLOW DURATION COMPARISON")
print("=" * 80 + "\n")

for branch in branches:
stats = results[branch]
print(f"Branch: {branch}")
print(f" Completed runs: {stats['count']}")
print(f" Average: {stats['avg_minutes']:.2f} minutes ({stats['avg_seconds']:.1f} seconds)")
print(f" Median: {stats['median_minutes']:.2f} minutes ({stats['median_seconds']:.1f} seconds)")
print()

# Calculate difference
if results[branches[0]]["count"] > 0 and results[branches[1]]["count"] > 0:
print("=" * 80)
print("COMPARISON")
print("=" * 80 + "\n")

cache_stats = results["test-action-cache-effects"]
no_cache_stats = results["test-action-no-cache"]

avg_diff = no_cache_stats["avg_seconds"] - cache_stats["avg_seconds"]
median_diff = no_cache_stats["median_seconds"] - cache_stats["median_seconds"]

avg_pct = (avg_diff / no_cache_stats["avg_seconds"]) * 100 if no_cache_stats["avg_seconds"] > 0 else 0
median_pct = (median_diff / no_cache_stats["median_seconds"]) * 100 if no_cache_stats["median_seconds"] > 0 else 0

print(f"Average difference: {abs(avg_diff):.1f} seconds ({abs(avg_pct):.1f}%)")
if avg_diff > 0:
print(f" → Cache makes workflow FASTER")
else:
print(f" → Cache makes workflow SLOWER")

print(f"\nMedian difference: {abs(median_diff):.1f} seconds ({abs(median_pct):.1f}%)")
if median_diff > 0:
print(f" → Cache makes workflow FASTER")
else:
print(f" → Cache makes workflow SLOWER")


if __name__ == "__main__":
main()