From 10b067c5b7aadf5d305f1f48b937a337ff295617 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 29 Oct 2025 17:04:19 -0400 Subject: [PATCH 1/2] Move time graph to the top --- internal/server/static/index.html | 36 ++++++++++++++++--------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/internal/server/static/index.html b/internal/server/static/index.html index 63fbdae..3c7b1b9 100644 --- a/internal/server/static/index.html +++ b/internal/server/static/index.html @@ -1667,8 +1667,9 @@

Why calculate PR costs?

} // Build the HTML for the timeline - let html = '
'; - html += '

Workflow State Timeline

'; + let html = '
'; + html += '

Workflow State Timeline

'; + html += '

How long, on average, is a PR stuck in each state?

'; // Stacked bar html += '
'; @@ -2090,16 +2091,17 @@

Why calculate PR costs?

// Generate HTML with efficiency section at top let html = formatEfficiencyHTML(efficiencyPct, grade, message, preventableCost, preventableHours, b.total_cost, totalHours, b.pr_duration, false, 0, 0); - html += '
'; - html += '

Cost Breakdown

'; - html += '
' + formatBreakdown(data) + '
'; - html += '
'; // Add workflow timeline if available (only when using turnserver) if (data.seconds_in_state) { html += formatWorkflowTimeline(data.seconds_in_state); } + html += '
'; + html += '

Cost Breakdown

'; + html += '
' + formatBreakdown(data) + '
'; + html += '
'; + resultDiv.innerHTML = html; } @@ -2230,6 +2232,17 @@

Why calculate PR costs?

const avgEfficiencyPct = avgTotalHours > 0 ? 100.0 * (avgTotalHours - avgPreventableHours) / avgTotalHours : 100.0; const avgEfficiency = efficiencyGrade(avgEfficiencyPct); + // Add workflow timeline if available (only when using turnserver) + if (data.seconds_in_state) { + // Calculate average seconds per PR + const avgSecondsInState = {}; + const sampleCount = e.successful_samples || 1; + for (const [state, totalSeconds] of Object.entries(data.seconds_in_state)) { + avgSecondsInState[state] = Math.round(totalSeconds / sampleCount); + } + html += formatWorkflowTimeline(avgSecondsInState); + } + // Extrapolated total section html += '
'; html += `

${days}-day Estimated Costs

`; @@ -2242,17 +2255,6 @@

Why calculate PR costs?

html += '
' + formatAveragePR(e) + '
'; html += '
'; - // Add workflow timeline if available (only when using turnserver) - if (data.seconds_in_state) { - // Calculate average seconds per PR - const avgSecondsInState = {}; - const sampleCount = e.successful_samples || 1; - for (const [state, totalSeconds] of Object.entries(data.seconds_in_state)) { - avgSecondsInState[state] = Math.round(totalSeconds / sampleCount); - } - html += formatWorkflowTimeline(avgSecondsInState); - } - resultDiv.innerHTML = html; resolve(); return; From ea3e005271407d12503972444a896167e60b6dab Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 29 Oct 2025 17:21:44 -0400 Subject: [PATCH 2/2] Add merge time modeling for everyone --- cmd/prcost/main.go | 92 ++++++++++++++++++++++++++++-- cmd/prcost/repository.go | 95 ++++++++++++++++++++++++++++--- internal/server/static/index.html | 64 +++++++++++++++++++-- 3 files changed, 236 insertions(+), 15 deletions(-) diff --git a/cmd/prcost/main.go b/cmd/prcost/main.go index 656e35d..6c6bcef 100644 --- a/cmd/prcost/main.go +++ b/cmd/prcost/main.go @@ -34,6 +34,9 @@ func main() { samples := flag.Int("samples", 30, "Number of PRs to sample for extrapolation (30=fast/±18%, 50=slower/±14%)") days := flag.Int("days", 60, "Number of days to look back for PR modifications") + // Modeling flags + modelMergeTime := flag.Duration("model-merge-time", 1*time.Hour, "Model savings if average merge time was reduced to this duration") + flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [options] \n", os.Args[0]) fmt.Fprintf(os.Stderr, " %s --org [--repo ] [options]\n\n", os.Args[0]) @@ -119,7 +122,7 @@ func main() { if *repo != "" { // Single repository mode - err := analyzeRepository(ctx, *org, *repo, *samples, *days, cfg, token, *dataSource) + err := analyzeRepository(ctx, *org, *repo, *samples, *days, cfg, token, *dataSource, modelMergeTime) if err != nil { log.Fatalf("Repository analysis failed: %v", err) } @@ -130,7 +133,7 @@ func main() { "samples", *samples, "days", *days) - err := analyzeOrganization(ctx, *org, *samples, *days, cfg, token, *dataSource) + err := analyzeOrganization(ctx, *org, *samples, *days, cfg, token, *dataSource, modelMergeTime) if err != nil { log.Fatalf("Organization analysis failed: %v", err) } @@ -174,7 +177,7 @@ func main() { // Output in requested format switch *format { case "human": - printHumanReadable(&breakdown, prURL) + printHumanReadable(&breakdown, prURL, *modelMergeTime, cfg) case "json": encoder := json.NewEncoder(os.Stdout) encoder.SetIndent("", " ") @@ -206,7 +209,7 @@ func authToken(ctx context.Context) (string, error) { } // printHumanReadable outputs a detailed itemized bill in human-readable format. -func printHumanReadable(breakdown *cost.Breakdown, prURL string) { +func printHumanReadable(breakdown *cost.Breakdown, prURL string, modelMergeTime time.Duration, cfg cost.Config) { // Helper to format currency with commas formatCurrency := func(amount float64) string { return fmt.Sprintf("$%s", formatWithCommas(amount)) @@ -309,6 +312,11 @@ func printHumanReadable(breakdown *cost.Breakdown, prURL string) { // Print efficiency score printEfficiency(breakdown) + + // Print modeling callout if PR duration exceeds model merge time + if breakdown.PRDuration > modelMergeTime.Hours() { + printMergeTimeModelingCallout(breakdown, modelMergeTime, cfg) + } } // printDelayCosts prints delay and future costs section. @@ -519,6 +527,82 @@ func mergeVelocityGrade(avgOpenDays float64) (grade, message string) { } } +// printMergeTimeModelingCallout prints a callout showing potential savings from reduced merge time. +func printMergeTimeModelingCallout(breakdown *cost.Breakdown, targetMergeTime time.Duration, cfg cost.Config) { + targetHours := targetMergeTime.Hours() + currentHours := breakdown.PRDuration + + // Calculate hourly rate + hourlyRate := (cfg.AnnualSalary * cfg.BenefitsMultiplier) / cfg.HoursPerYear + + // Recalculate delivery delay with target merge time + remodelDeliveryDelayCost := hourlyRate * cfg.DeliveryDelayFactor * targetHours + + // Code churn: 40min-1h is too short for meaningful code churn (< 1 day) + remodelCodeChurnCost := 0.0 + + // Automated updates: only applies to PRs open > 1 day + remodelAutomatedUpdatesCost := 0.0 + + // PR tracking: scales with open time (already minimal for short PRs) + remodelPRTrackingCost := 0.0 + if targetHours >= 1.0 { // Only track PRs open >= 1 hour + daysOpen := targetHours / 24.0 + remodelPRTrackingHours := (cfg.PRTrackingMinutesPerDay / 60.0) * daysOpen + remodelPRTrackingCost = remodelPRTrackingHours * hourlyRate + } + + // Calculate savings for this PR + currentPreventable := breakdown.DelayCostDetail.DeliveryDelayCost + + breakdown.DelayCostDetail.CodeChurnCost + + breakdown.DelayCostDetail.AutomatedUpdatesCost + + breakdown.DelayCostDetail.PRTrackingCost + + remodelPreventable := remodelDeliveryDelayCost + remodelCodeChurnCost + + remodelAutomatedUpdatesCost + remodelPRTrackingCost + + savingsPerPR := currentPreventable - remodelPreventable + + // Calculate efficiency improvement + // Current efficiency: (total hours - preventable hours) / total hours + // Modeled efficiency: (total hours - remodeled preventable hours) / total hours + totalHours := breakdown.Author.TotalHours + breakdown.DelayCostDetail.TotalDelayHours + for _, p := range breakdown.Participants { + totalHours += p.TotalHours + } + + var currentEfficiency, modeledEfficiency, efficiencyDelta float64 + if totalHours > 0 { + currentEfficiency = 100.0 * (totalHours - (currentPreventable / hourlyRate)) / totalHours + modeledEfficiency = 100.0 * (totalHours - (remodelPreventable / hourlyRate)) / totalHours + efficiencyDelta = modeledEfficiency - currentEfficiency + } + + // Estimate annual savings assuming similar PR frequency + // Use a conservative estimate: this PR represents typical overhead + // Extrapolate to 52 weeks based on how long this PR was open + if savingsPerPR > 0 && currentHours > 0 { + // Rough annual extrapolation: (savings per PR) × (52 weeks) / (weeks this PR was open) + weeksOpen := currentHours / (24.0 * 7.0) + if weeksOpen < 0.01 { + weeksOpen = 0.01 // Avoid division by zero, minimum 1% of a week + } + annualSavings := savingsPerPR * (52.0 / weeksOpen) + + fmt.Println(" ┌─────────────────────────────────────────────────────────────┐") + fmt.Printf(" │ %-60s│\n", "MERGE TIME MODELING") + fmt.Println(" └─────────────────────────────────────────────────────────────┘") + fmt.Printf(" If you lowered your average merge time to %s, you would save\n", formatTimeUnit(targetHours)) + fmt.Printf(" ~$%s/yr in engineering overhead", formatWithCommas(annualSavings)) + if efficiencyDelta > 0 { + fmt.Printf(" (+%.1f%% throughput).\n", efficiencyDelta) + } else { + fmt.Println(".") + } + fmt.Println() + } +} + // printEfficiency prints the workflow efficiency section for a single PR. func printEfficiency(breakdown *cost.Breakdown) { // Calculate preventable waste: Code Churn + All Delay Costs + Automated Updates + PR Tracking diff --git a/cmd/prcost/repository.go b/cmd/prcost/repository.go index 4c21cbf..6e8a651 100644 --- a/cmd/prcost/repository.go +++ b/cmd/prcost/repository.go @@ -14,7 +14,9 @@ import ( // analyzeRepository performs repository-wide cost analysis by sampling PRs. // Uses library functions from pkg/github and pkg/cost for fetching, sampling, // and extrapolation - all functionality is available to external clients. -func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days int, cfg cost.Config, token string, dataSource string) error { +// +//nolint:revive // argument-limit: acceptable for entry point function +func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days int, cfg cost.Config, token, dataSource string, modelMergeTime *time.Duration) error { // Calculate since date since := time.Now().AddDate(0, 0, -days) @@ -103,7 +105,7 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg) // Display results in itemized format - printExtrapolatedResults(fmt.Sprintf("%s/%s", owner, repo), actualDays, &extrapolated, cfg) + printExtrapolatedResults(fmt.Sprintf("%s/%s", owner, repo), actualDays, &extrapolated, cfg, *modelMergeTime) return nil } @@ -111,7 +113,9 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days // analyzeOrganization performs organization-wide cost analysis by sampling PRs across all repos. // Uses library functions from pkg/github and pkg/cost for fetching, sampling, // and extrapolation - all functionality is available to external clients. -func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, cfg cost.Config, token string, dataSource string) error { +// +//nolint:revive // argument-limit: acceptable for entry point function +func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, cfg cost.Config, token, dataSource string, modelMergeTime *time.Duration) error { slog.Info("Fetching PR list from organization") // Calculate since date @@ -203,7 +207,7 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg) // Display results in itemized format - printExtrapolatedResults(fmt.Sprintf("%s (organization)", org), actualDays, &extrapolated, cfg) + printExtrapolatedResults(fmt.Sprintf("%s (organization)", org), actualDays, &extrapolated, cfg, *modelMergeTime) return nil } @@ -274,7 +278,7 @@ func formatTimeUnit(hours float64) string { // printExtrapolatedResults displays extrapolated cost breakdown in itemized format. // //nolint:maintidx,revive // acceptable complexity/length for comprehensive display function -func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBreakdown, cfg cost.Config) { +func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBreakdown, cfg cost.Config, modelMergeTime time.Duration) { fmt.Println() fmt.Printf(" %s\n", title) avgOpenTime := formatTimeUnit(ext.AvgPRDurationHours) @@ -594,11 +598,11 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea fmt.Println() // Print extrapolated efficiency score + annual waste - printExtrapolatedEfficiency(ext, days, cfg) + printExtrapolatedEfficiency(ext, days, cfg, modelMergeTime) } // printExtrapolatedEfficiency prints the workflow efficiency + annual waste section for extrapolated totals. -func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg cost.Config) { +func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg cost.Config, modelMergeTime time.Duration) { // Calculate preventable waste: Code Churn + All Delay Costs + Automated Updates + PR Tracking preventableHours := ext.CodeChurnHours + ext.DeliveryDelayHours + ext.AutomatedUpdatesHours + ext.PRTrackingHours preventableCost := ext.CodeChurnCost + ext.DeliveryDelayCost + ext.AutomatedUpdatesCost + ext.PRTrackingCost @@ -654,4 +658,81 @@ func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg fmt.Printf(" If Sustained for 1 Year: $%14s %.1f headcount\n", formatWithCommas(annualWasteCost), headcount) fmt.Println() + + // Print merge time modeling callout if average PR duration exceeds model merge time + if ext.AvgPRDurationHours > modelMergeTime.Hours() { + printExtrapolatedMergeTimeModelingCallout(ext, days, modelMergeTime, cfg) + } +} + +// printExtrapolatedMergeTimeModelingCallout prints a callout showing potential savings from reduced merge time. +func printExtrapolatedMergeTimeModelingCallout(ext *cost.ExtrapolatedBreakdown, days int, targetMergeTime time.Duration, cfg cost.Config) { + targetHours := targetMergeTime.Hours() + + // Calculate hourly rate + hourlyRate := (cfg.AnnualSalary * cfg.BenefitsMultiplier) / cfg.HoursPerYear + + // Recalculate average preventable costs with target merge time + // This mirrors the logic from ExtrapolateFromSamples but with target merge time + + // Average delivery delay per PR at target merge time + remodelDeliveryDelayPerPR := hourlyRate * cfg.DeliveryDelayFactor * targetHours + + // Code churn: minimal for short PRs (< 1 day = ~0%) + remodelCodeChurnPerPR := 0.0 + + // Automated updates: only for PRs open > 1 day + remodelAutomatedUpdatesPerPR := 0.0 + + // PR tracking: scales with open time + remodelPRTrackingPerPR := 0.0 + if targetHours >= 1.0 { // Only track PRs open >= 1 hour + daysOpen := targetHours / 24.0 + remodelPRTrackingHours := (cfg.PRTrackingMinutesPerDay / 60.0) * daysOpen + remodelPRTrackingPerPR = remodelPRTrackingHours * hourlyRate + } + + // Calculate total remodeled preventable cost for the period + totalPRs := float64(ext.TotalPRs) + remodelPreventablePerPeriod := (remodelDeliveryDelayPerPR + remodelCodeChurnPerPR + + remodelAutomatedUpdatesPerPR + remodelPRTrackingPerPR) * totalPRs + + // Current preventable cost for the period + currentPreventablePerPeriod := ext.CodeChurnCost + ext.DeliveryDelayCost + + ext.AutomatedUpdatesCost + ext.PRTrackingCost + + // Calculate savings for the period + savingsPerPeriod := currentPreventablePerPeriod - remodelPreventablePerPeriod + + // Calculate efficiency improvement + // Current efficiency: (total hours - preventable hours) / total hours + // Modeled efficiency: (total hours - remodeled preventable hours) / total hours + currentPreventableHours := ext.CodeChurnHours + ext.DeliveryDelayHours + + ext.AutomatedUpdatesHours + ext.PRTrackingHours + remodelPreventableHours := remodelPreventablePerPeriod / hourlyRate + + var currentEfficiency, modeledEfficiency, efficiencyDelta float64 + if ext.TotalHours > 0 { + currentEfficiency = 100.0 * (ext.TotalHours - currentPreventableHours) / ext.TotalHours + modeledEfficiency = 100.0 * (ext.TotalHours - remodelPreventableHours) / ext.TotalHours + efficiencyDelta = modeledEfficiency - currentEfficiency + } + + if savingsPerPeriod > 0 { + // Annualize the savings + weeksInPeriod := float64(days) / 7.0 + annualSavings := savingsPerPeriod * (52.0 / weeksInPeriod) + + fmt.Println(" ┌─────────────────────────────────────────────────────────────┐") + fmt.Printf(" │ %-60s│\n", "MERGE TIME MODELING") + fmt.Println(" └─────────────────────────────────────────────────────────────┘") + fmt.Printf(" If you lowered your average merge time to %s, you would save\n", formatTimeUnit(targetHours)) + fmt.Printf(" ~$%s/yr in engineering overhead", formatWithCommas(annualSavings)) + if efficiencyDelta > 0 { + fmt.Printf(" (+%.1f%% throughput).\n", efficiencyDelta) + } else { + fmt.Println(".") + } + fmt.Println() + } } diff --git a/internal/server/static/index.html b/internal/server/static/index.html index 3c7b1b9..fc7095f 100644 --- a/internal/server/static/index.html +++ b/internal/server/static/index.html @@ -1457,7 +1457,7 @@

Why calculate PR costs?

return html; } - function formatR2RCallout(avgOpenHours, r2rSavings) { + function formatR2RCallout(avgOpenHours, r2rSavings, currentEfficiency, modeledEfficiency) { // Only show if average merge velocity is > 1 hour if (avgOpenHours <= 1) { return ''; @@ -1473,14 +1473,48 @@

Why calculate PR costs?

savingsText = '$' + r2rSavings.toFixed(0); } + const efficiencyDelta = modeledEfficiency - currentEfficiency; + let throughputText = ''; + if (efficiencyDelta > 0) { + throughputText = ' (+' + efficiencyDelta.toFixed(1) + '% throughput)'; + } + let html = '
'; - html += '✓ You\'re losing ' + savingsText + '/yr to code review lag. '; + html += '✓ You\'re losing ' + savingsText + '/yr' + throughputText + ' to code review lag. '; html += 'Ready-to-Review fixes it: <40min merges, free for OSS. '; html += 'go-faster@codeGROOVE.dev'; html += '
'; return html; } + function formatGenericMergeTimeCallout(avgOpenHours, modeledSavings, currentEfficiency, modeledEfficiency) { + // Only show if average merge velocity is > 1 hour + if (avgOpenHours <= 1) { + return ''; + } + + // Format savings with appropriate precision + let savingsText; + if (modeledSavings >= 1000000) { + savingsText = '$' + (modeledSavings / 1000000).toFixed(1) + 'M'; + } else if (modeledSavings >= 1000) { + savingsText = '$' + (modeledSavings / 1000).toFixed(0) + 'K'; + } else { + savingsText = '$' + modeledSavings.toFixed(0); + } + + const efficiencyDelta = modeledEfficiency - currentEfficiency; + let throughputText = ''; + if (efficiencyDelta > 0) { + throughputText = ' (+' + efficiencyDelta.toFixed(1) + '% throughput)'; + } + + let html = '
'; + html += '💡 Merge Time Modeling: If you lowered your average merge time to 1h, you would save ~' + savingsText + '/yr in engineering overhead' + throughputText + '.'; + html += '
'; + return html; + } + function formatBreakdown(data) { const b = data.breakdown; let output = '\n'; @@ -2213,10 +2247,32 @@

Why calculate PR costs?

const avgPRDurationHours = e.avg_pr_duration_hours || 0; html += formatEfficiencyHTML(extEfficiencyPct, extEfficiency.grade, extEfficiency.message, extPreventableCost, extPreventableHours, e.total_cost, e.total_hours, avgPRDurationHours, true, annualWasteCost, annualWasteHours, wasteHoursPerWeek, wasteCostPerWeek, wasteHoursPerAuthorPerWeek, wasteCostPerAuthorPerWeek, totalAuthors, salary, benefitsMultiplier, analysisType, sourceName); - // Add R2R callout if enabled and merge velocity > 1 hour + // Add R2R callout if enabled, otherwise generic merge time callout + // Calculate modeled efficiency (with 40min/1h merge time) + const targetMergeHours = 40 / 60; // 40 minutes in hours + const hourlyRate = (salary * benefitsMultiplier) / 2080; + + // Remodel preventable costs with target merge time + const deliveryDelayFactor = 0.20; // From DefaultConfig + const remodelDeliveryDelayPerPR = hourlyRate * deliveryDelayFactor * targetMergeHours; + const remodelCodeChurnPerPR = 0; // < 1 day, no churn + const remodelAutomatedUpdatesPerPR = 0; // < 1 day, no automated updates + const remodelPRTrackingPerPR = 0; // < 1 hour, no tracking + + const totalPRs = e.total_prs; + const remodelPreventableCost = (remodelDeliveryDelayPerPR + remodelCodeChurnPerPR + + remodelAutomatedUpdatesPerPR + remodelPRTrackingPerPR) * totalPRs; + const remodelPreventableHours = remodelPreventableCost / hourlyRate; + + const modeledEfficiency = e.total_hours > 0 ? + 100.0 * (e.total_hours - remodelPreventableHours) / e.total_hours : 100.0; + if (data.r2r_callout) { const r2rSavings = e.r2r_savings || 0; - html += formatR2RCallout(avgPRDurationHours, r2rSavings); + html += formatR2RCallout(avgPRDurationHours, r2rSavings, extEfficiencyPct, modeledEfficiency); + } else { + const r2rSavings = e.r2r_savings || 0; + html += formatGenericMergeTimeCallout(avgPRDurationHours, r2rSavings, extEfficiencyPct, modeledEfficiency); } // Calculate average PR efficiency