diff --git a/Makefile b/Makefile index 8e4de09..e954b66 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,15 @@ .PHONY: test -test: +test: test-go test-js + +.PHONY: test-go +test-go: go test -race -cover ./... +.PHONY: test-js +test-js: + @echo "Running JavaScript tests..." + @node internal/server/static/formatR2RCallout.test.js + # BEGIN: lint-install . # http://github.com/codeGROOVE-dev/lint-install diff --git a/cmd/prcost/main.go b/cmd/prcost/main.go index 7304b25..4dccafd 100644 --- a/cmd/prcost/main.go +++ b/cmd/prcost/main.go @@ -35,7 +35,7 @@ func main() { days := flag.Int("days", 60, "Number of days to look back for PR modifications") // Modeling flags - modelMergeTime := flag.Duration("model-merge-time", 1*time.Hour, "Model savings if average merge time was reduced to this duration") + targetMergeTime := flag.Duration("target-merge-time", 90*time.Minute, "Target merge time for efficiency modeling (default: 90 minutes / 1.5 hours)") flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage: %s [options] \n", os.Args[0]) @@ -100,11 +100,13 @@ func main() { cfg.AnnualSalary = *salary cfg.BenefitsMultiplier = *benefits cfg.EventDuration = time.Duration(*eventMinutes) * time.Minute + cfg.TargetMergeTimeHours = targetMergeTime.Hours() slog.Debug("Configuration", "salary", cfg.AnnualSalary, "benefits_multiplier", cfg.BenefitsMultiplier, "event_minutes", *eventMinutes, + "target_merge_time_hours", cfg.TargetMergeTimeHours, "delivery_delay_factor", cfg.DeliveryDelayFactor) // Retrieve GitHub token from gh CLI @@ -122,7 +124,7 @@ func main() { if *repo != "" { // Single repository mode - err := analyzeRepository(ctx, *org, *repo, *samples, *days, cfg, token, *dataSource, modelMergeTime) + err := analyzeRepository(ctx, *org, *repo, *samples, *days, cfg, token, *dataSource) if err != nil { log.Fatalf("Repository analysis failed: %v", err) } @@ -133,7 +135,7 @@ func main() { "samples", *samples, "days", *days) - err := analyzeOrganization(ctx, *org, *samples, *days, cfg, token, *dataSource, modelMergeTime) + err := analyzeOrganization(ctx, *org, *samples, *days, cfg, token, *dataSource) if err != nil { log.Fatalf("Organization analysis failed: %v", err) } @@ -177,7 +179,7 @@ func main() { // Output in requested format switch *format { case "human": - printHumanReadable(&breakdown, prURL, *modelMergeTime, cfg) + printHumanReadable(&breakdown, prURL, cfg) case "json": encoder := json.NewEncoder(os.Stdout) encoder.SetIndent("", " ") @@ -209,7 +211,7 @@ func authToken(ctx context.Context) (string, error) { } // printHumanReadable outputs a detailed itemized bill in human-readable format. -func printHumanReadable(breakdown *cost.Breakdown, prURL string, modelMergeTime time.Duration, cfg cost.Config) { +func printHumanReadable(breakdown *cost.Breakdown, prURL string, cfg cost.Config) { // Helper to format currency with commas formatCurrency := func(amount float64) string { return fmt.Sprintf("$%s", formatWithCommas(amount)) @@ -313,9 +315,9 @@ func printHumanReadable(breakdown *cost.Breakdown, prURL string, modelMergeTime // Print efficiency score printEfficiency(breakdown) - // Print modeling callout if PR duration exceeds model merge time - if breakdown.PRDuration > modelMergeTime.Hours() { - printMergeTimeModelingCallout(breakdown, modelMergeTime, cfg) + // Print modeling callout if PR duration exceeds target merge time + if breakdown.PRDuration > cfg.TargetMergeTimeHours { + printMergeTimeModelingCallout(breakdown, cfg) } } @@ -528,8 +530,8 @@ func mergeVelocityGrade(avgOpenDays float64) (grade, message string) { } // printMergeTimeModelingCallout prints a callout showing potential savings from reduced merge time. -func printMergeTimeModelingCallout(breakdown *cost.Breakdown, targetMergeTime time.Duration, cfg cost.Config) { - targetHours := targetMergeTime.Hours() +func printMergeTimeModelingCallout(breakdown *cost.Breakdown, cfg cost.Config) { + targetHours := cfg.TargetMergeTimeHours currentHours := breakdown.PRDuration // Calculate hourly rate @@ -538,7 +540,7 @@ func printMergeTimeModelingCallout(breakdown *cost.Breakdown, targetMergeTime ti // Recalculate delivery delay with target merge time remodelDeliveryDelayCost := hourlyRate * cfg.DeliveryDelayFactor * targetHours - // Code churn: 40min-1h is too short for meaningful code churn (< 1 day) + // Code churn: target time is too short for meaningful code churn (< 1 day) remodelCodeChurnCost := 0.0 // Automated updates: only applies to PRs open > 1 day @@ -546,7 +548,7 @@ func printMergeTimeModelingCallout(breakdown *cost.Breakdown, targetMergeTime ti // PR tracking: scales with open time (already minimal for short PRs) remodelPRTrackingCost := 0.0 - if targetHours >= 1.0 { // Only track PRs open >= 1 hour + if targetHours >= 1.0 { // Minimal tracking for PRs open >= 1 hour daysOpen := targetHours / 24.0 remodelPRTrackingHours := (cfg.PRTrackingMinutesPerDay / 60.0) * daysOpen remodelPRTrackingCost = remodelPRTrackingHours * hourlyRate diff --git a/cmd/prcost/repository.go b/cmd/prcost/repository.go index 6e8a651..9dff09f 100644 --- a/cmd/prcost/repository.go +++ b/cmd/prcost/repository.go @@ -16,7 +16,7 @@ import ( // and extrapolation - all functionality is available to external clients. // //nolint:revive // argument-limit: acceptable for entry point function -func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days int, cfg cost.Config, token, dataSource string, modelMergeTime *time.Duration) error { +func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days int, cfg cost.Config, token, dataSource string) error { // Calculate since date since := time.Now().AddDate(0, 0, -days) @@ -105,7 +105,7 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg) // Display results in itemized format - printExtrapolatedResults(fmt.Sprintf("%s/%s", owner, repo), actualDays, &extrapolated, cfg, *modelMergeTime) + printExtrapolatedResults(fmt.Sprintf("%s/%s", owner, repo), actualDays, &extrapolated, cfg) return nil } @@ -115,7 +115,7 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days // and extrapolation - all functionality is available to external clients. // //nolint:revive // argument-limit: acceptable for entry point function -func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, cfg cost.Config, token, dataSource string, modelMergeTime *time.Duration) error { +func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, cfg cost.Config, token, dataSource string) error { slog.Info("Fetching PR list from organization") // Calculate since date @@ -207,7 +207,7 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg) // Display results in itemized format - printExtrapolatedResults(fmt.Sprintf("%s (organization)", org), actualDays, &extrapolated, cfg, *modelMergeTime) + printExtrapolatedResults(fmt.Sprintf("%s (organization)", org), actualDays, &extrapolated, cfg) return nil } @@ -278,7 +278,7 @@ func formatTimeUnit(hours float64) string { // printExtrapolatedResults displays extrapolated cost breakdown in itemized format. // //nolint:maintidx,revive // acceptable complexity/length for comprehensive display function -func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBreakdown, cfg cost.Config, modelMergeTime time.Duration) { +func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBreakdown, cfg cost.Config) { fmt.Println() fmt.Printf(" %s\n", title) avgOpenTime := formatTimeUnit(ext.AvgPRDurationHours) @@ -396,7 +396,7 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea fmt.Println() } - // Merge Delay section + // Delay Costs section avgHumanOpenTime := formatTimeUnit(ext.AvgHumanPRDurationHours) avgBotOpenTime := formatTimeUnit(ext.AvgBotPRDurationHours) delayCostsHeader := fmt.Sprintf(" Delay Costs (human PRs avg %s open", avgHumanOpenTime) @@ -422,6 +422,17 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea fmt.Print(formatSubtotalLine(avgMergeDelayCost, formatTimeUnit(avgMergeDelayHours), fmt.Sprintf("(%.1f%%)", pct))) fmt.Println() + // Preventable Future Costs section + if avgCodeChurnCost > 0 { + fmt.Println(" Preventable Future Costs") + fmt.Println(" ────────────────────────") + fmt.Print(formatItemLine("Rework due to churn", avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), fmt.Sprintf("(%d PRs)", ext.CodeChurnPRCount))) + fmt.Print(formatSectionDivider()) + pct = (avgCodeChurnCost / avgTotalCost) * 100 + fmt.Print(formatSubtotalLine(avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), fmt.Sprintf("(%.1f%%)", pct))) + fmt.Println() + } + // Future Costs section avgFutureReviewCost := ext.FutureReviewCost / float64(ext.TotalPRs) avgFutureMergeCost := ext.FutureMergeCost / float64(ext.TotalPRs) @@ -430,15 +441,12 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea avgFutureMergeHours := ext.FutureMergeHours / float64(ext.TotalPRs) avgFutureContextHours := ext.FutureContextHours / float64(ext.TotalPRs) - hasFutureCosts := ext.CodeChurnCost > 0.01 || ext.FutureReviewCost > 0.01 || + hasFutureCosts := ext.FutureReviewCost > 0.01 || ext.FutureMergeCost > 0.01 || ext.FutureContextCost > 0.01 if hasFutureCosts { fmt.Println(" Future Costs") fmt.Println(" ────────────") - if ext.CodeChurnCost > 0.01 { - fmt.Print(formatItemLine("Code Churn", avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), fmt.Sprintf("(%d PRs)", ext.CodeChurnPRCount))) - } if ext.FutureReviewCost > 0.01 { fmt.Print(formatItemLine("Review", avgFutureReviewCost, formatTimeUnit(avgFutureReviewHours), fmt.Sprintf("(%d PRs)", ext.FutureReviewPRCount))) } @@ -449,8 +457,8 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea avgFutureContextSessions := float64(ext.FutureContextSessions) / float64(ext.TotalPRs) fmt.Print(formatItemLine("Context Switching", avgFutureContextCost, formatTimeUnit(avgFutureContextHours), fmt.Sprintf("(%.1f sessions)", avgFutureContextSessions))) } - avgFutureCost := avgCodeChurnCost + avgFutureReviewCost + avgFutureMergeCost + avgFutureContextCost - avgFutureHours := avgCodeChurnHours + avgFutureReviewHours + avgFutureMergeHours + avgFutureContextHours + avgFutureCost := avgFutureReviewCost + avgFutureMergeCost + avgFutureContextCost + avgFutureHours := avgFutureReviewHours + avgFutureMergeHours + avgFutureContextHours fmt.Print(formatSectionDivider()) pct = (avgFutureCost / avgTotalCost) * 100 fmt.Print(formatSubtotalLine(avgFutureCost, formatTimeUnit(avgFutureHours), fmt.Sprintf("(%.1f%%)", pct))) @@ -529,7 +537,7 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea fmt.Println() } - // Merge Delay section (extrapolated) + // Delay Costs section (extrapolated) extAvgHumanOpenTime := formatTimeUnit(ext.AvgHumanPRDurationHours) extAvgBotOpenTime := formatTimeUnit(ext.AvgBotPRDurationHours) extDelayCostsHeader := fmt.Sprintf(" Delay Costs (human PRs avg %s open", extAvgHumanOpenTime) @@ -549,25 +557,33 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea if ext.PRTrackingCost > 0 { fmt.Print(formatItemLine("PR Tracking", ext.PRTrackingCost, formatTimeUnit(ext.PRTrackingHours), fmt.Sprintf("(%d open PRs)", ext.OpenPRs))) } - extMergeDelayCost := ext.DeliveryDelayCost + ext.CodeChurnCost + ext.AutomatedUpdatesCost + ext.PRTrackingCost - extMergeDelayHours := ext.DeliveryDelayHours + ext.CodeChurnHours + ext.AutomatedUpdatesHours + ext.PRTrackingHours + extMergeDelayCost := ext.DeliveryDelayCost + ext.AutomatedUpdatesCost + ext.PRTrackingCost + extMergeDelayHours := ext.DeliveryDelayHours + ext.AutomatedUpdatesHours + ext.PRTrackingHours fmt.Print(formatSectionDivider()) pct = (extMergeDelayCost / ext.TotalCost) * 100 fmt.Print(formatSubtotalLine(extMergeDelayCost, formatTimeUnit(extMergeDelayHours), fmt.Sprintf("(%.1f%%)", pct))) fmt.Println() + // Preventable Future Costs section (extrapolated) + if ext.CodeChurnCost > 0 { + fmt.Println(" Preventable Future Costs") + fmt.Println(" ────────────────────────") + totalKLOC := float64(ext.TotalNewLines+ext.TotalModifiedLines) / 1000.0 + churnLOCStr := formatLOC(totalKLOC) + fmt.Print(formatItemLine("Rework due to churn", ext.CodeChurnCost, formatTimeUnit(ext.CodeChurnHours), fmt.Sprintf("(%d PRs, ~%s)", ext.CodeChurnPRCount, churnLOCStr))) + fmt.Print(formatSectionDivider()) + pct = (ext.CodeChurnCost / ext.TotalCost) * 100 + fmt.Print(formatSubtotalLine(ext.CodeChurnCost, formatTimeUnit(ext.CodeChurnHours), fmt.Sprintf("(%.1f%%)", pct))) + fmt.Println() + } + // Future Costs section (extrapolated) - extHasFutureCosts := ext.CodeChurnCost > 0.01 || ext.FutureReviewCost > 0.01 || + extHasFutureCosts := ext.FutureReviewCost > 0.01 || ext.FutureMergeCost > 0.01 || ext.FutureContextCost > 0.01 if extHasFutureCosts { fmt.Println(" Future Costs") fmt.Println(" ────────────") - if ext.CodeChurnCost > 0.01 { - totalKLOC := float64(ext.TotalNewLines+ext.TotalModifiedLines) / 1000.0 - churnLOCStr := formatLOC(totalKLOC) - fmt.Print(formatItemLine("Code Churn", ext.CodeChurnCost, formatTimeUnit(ext.CodeChurnHours), fmt.Sprintf("(%d PRs, ~%s)", ext.CodeChurnPRCount, churnLOCStr))) - } if ext.FutureReviewCost > 0.01 { fmt.Print(formatItemLine("Review", ext.FutureReviewCost, formatTimeUnit(ext.FutureReviewHours), fmt.Sprintf("(%d PRs)", ext.FutureReviewPRCount))) } @@ -577,8 +593,8 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea if ext.FutureContextCost > 0.01 { fmt.Print(formatItemLine("Context Switching", ext.FutureContextCost, formatTimeUnit(ext.FutureContextHours), fmt.Sprintf("(%d sessions)", ext.FutureContextSessions))) } - extFutureCost := ext.CodeChurnCost + ext.FutureReviewCost + ext.FutureMergeCost + ext.FutureContextCost - extFutureHours := ext.CodeChurnHours + ext.FutureReviewHours + ext.FutureMergeHours + ext.FutureContextHours + extFutureCost := ext.FutureReviewCost + ext.FutureMergeCost + ext.FutureContextCost + extFutureHours := ext.FutureReviewHours + ext.FutureMergeHours + ext.FutureContextHours fmt.Print(formatSectionDivider()) pct = (extFutureCost / ext.TotalCost) * 100 fmt.Print(formatSubtotalLine(extFutureCost, formatTimeUnit(extFutureHours), fmt.Sprintf("(%.1f%%)", pct))) @@ -598,11 +614,11 @@ func printExtrapolatedResults(title string, days int, ext *cost.ExtrapolatedBrea fmt.Println() // Print extrapolated efficiency score + annual waste - printExtrapolatedEfficiency(ext, days, cfg, modelMergeTime) + printExtrapolatedEfficiency(ext, days, cfg) } // printExtrapolatedEfficiency prints the workflow efficiency + annual waste section for extrapolated totals. -func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg cost.Config, modelMergeTime time.Duration) { +func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg cost.Config) { // Calculate preventable waste: Code Churn + All Delay Costs + Automated Updates + PR Tracking preventableHours := ext.CodeChurnHours + ext.DeliveryDelayHours + ext.AutomatedUpdatesHours + ext.PRTrackingHours preventableCost := ext.CodeChurnCost + ext.DeliveryDelayCost + ext.AutomatedUpdatesCost + ext.PRTrackingCost @@ -660,14 +676,14 @@ func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg fmt.Println() // Print merge time modeling callout if average PR duration exceeds model merge time - if ext.AvgPRDurationHours > modelMergeTime.Hours() { - printExtrapolatedMergeTimeModelingCallout(ext, days, modelMergeTime, cfg) + if ext.AvgPRDurationHours > cfg.TargetMergeTimeHours { + printExtrapolatedMergeTimeModelingCallout(ext, days, cfg) } } // printExtrapolatedMergeTimeModelingCallout prints a callout showing potential savings from reduced merge time. -func printExtrapolatedMergeTimeModelingCallout(ext *cost.ExtrapolatedBreakdown, days int, targetMergeTime time.Duration, cfg cost.Config) { - targetHours := targetMergeTime.Hours() +func printExtrapolatedMergeTimeModelingCallout(ext *cost.ExtrapolatedBreakdown, days int, cfg cost.Config) { + targetHours := cfg.TargetMergeTimeHours // Calculate hourly rate hourlyRate := (cfg.AnnualSalary * cfg.BenefitsMultiplier) / cfg.HoursPerYear @@ -686,7 +702,7 @@ func printExtrapolatedMergeTimeModelingCallout(ext *cost.ExtrapolatedBreakdown, // PR tracking: scales with open time remodelPRTrackingPerPR := 0.0 - if targetHours >= 1.0 { // Only track PRs open >= 1 hour + if targetHours >= 1.0 { // Minimal tracking for PRs open >= 1 hour daysOpen := targetHours / 24.0 remodelPRTrackingHours := (cfg.PRTrackingMinutesPerDay / 60.0) * daysOpen remodelPRTrackingPerPR = remodelPRTrackingHours * hourlyRate diff --git a/internal/server/server.go b/internal/server/server.go index 1ba2431..2c72566 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -2348,9 +2348,8 @@ func (s *Server) processOrgSampleWithProgress(ctx context.Context, req *OrgSampl // processPRsInParallel processes PRs in parallel and sends progress updates via SSE. // //nolint:revive // line-length/use-waitgroup-go: long function signature acceptable, standard wg pattern -func (s *Server) processPRsInParallel(workCtx, reqCtx context.Context, samples []github.PRSummary, defaultOwner, defaultRepo, token string, cfg cost.Config, writer http.ResponseWriter) ([]cost.Breakdown, map[string]int) { - var breakdowns []cost.Breakdown - aggregatedSeconds := make(map[string]int) +func (s *Server) processPRsInParallel(workCtx, reqCtx context.Context, samples []github.PRSummary, defaultOwner, defaultRepo, token string, cfg cost.Config, writer http.ResponseWriter) (breakdowns []cost.Breakdown, aggregatedSeconds map[string]int) { + aggregatedSeconds = make(map[string]int) var mu sync.Mutex var sseMu sync.Mutex // Protects SSE writes to prevent corrupted chunked encoding diff --git a/internal/server/static/README.md b/internal/server/static/README.md new file mode 100644 index 0000000..5df3e6a --- /dev/null +++ b/internal/server/static/README.md @@ -0,0 +1,38 @@ +# Static Assets Testing + +This directory contains static assets for the prcost web UI, including JavaScript functions that are tested separately. + +## JavaScript Testing + +Key functions are extracted into separate `.js` files for testing purposes: + +- `formatR2RCallout.js` - Renders the Ready-to-Review savings callout +- `formatR2RCallout.test.js` - Tests for the callout rendering + +### Running Tests + +```bash +# Run JavaScript tests only +make test-js + +# Run all tests (Go + JavaScript) +make test +``` + +### Test Coverage + +The JavaScript tests verify: +- Correct rendering of the savings callout HTML +- Proper formatting of dollar amounts ($50K, $2.5M, etc.) +- Presence of key messaging ("Pro-Tip:", "Ready-to-Review", etc.) +- Correct behavior for fast PRs (no callout for ≤1 hour) +- HTML structure and styling + +### Adding New Tests + +When modifying `index.html` JavaScript functions: + +1. Extract the function to a separate `.js` file (if not already extracted) +2. Add tests to the corresponding `.test.js` file +3. Run `make test-js` to verify +4. Commit both the function and test files together diff --git a/internal/server/static/formatR2RCallout.js b/internal/server/static/formatR2RCallout.js new file mode 100644 index 0000000..125fde3 --- /dev/null +++ b/internal/server/static/formatR2RCallout.js @@ -0,0 +1,38 @@ +// Extracted from index.html for testing purposes +function formatR2RCallout(avgOpenHours, r2rSavings, currentEfficiency, modeledEfficiency, targetMergeHours = 1.5) { + // Only show if average merge velocity is > target + if (avgOpenHours <= targetMergeHours) { + return ''; + } + + // Format savings with appropriate precision + let savingsText; + if (r2rSavings >= 1000000) { + savingsText = '$' + (r2rSavings / 1000000).toFixed(1) + 'M'; + } else if (r2rSavings >= 1000) { + savingsText = '$' + (r2rSavings / 1000).toFixed(0) + 'K'; + } else { + savingsText = '$' + r2rSavings.toFixed(0); + } + + const efficiencyDelta = modeledEfficiency - currentEfficiency; + let throughputText = ''; + if (efficiencyDelta > 0) { + throughputText = ' (+' + efficiencyDelta.toFixed(1) + '% throughput)'; + } + + // Format target merge time + let targetText = targetMergeHours.toFixed(1) + 'h'; + + let html = '
'; + html += 'Pro-Tip: Save ' + savingsText + '/yr in lost development effort by reducing merge times to <' + targetText + ' with '; + html += 'Ready-to-Review. '; + html += 'Free for OSS, cheap for everyone else.'; + html += '
'; + return html; +} + +// Export for testing (Node.js) or use globally (browser) +if (typeof module !== 'undefined' && module.exports) { + module.exports = { formatR2RCallout }; +} diff --git a/internal/server/static/formatR2RCallout.test.js b/internal/server/static/formatR2RCallout.test.js new file mode 100644 index 0000000..cf92647 --- /dev/null +++ b/internal/server/static/formatR2RCallout.test.js @@ -0,0 +1,97 @@ +// Simple test for formatR2RCallout function +// Run with: node formatR2RCallout.test.js + +const { formatR2RCallout } = require('./formatR2RCallout.js'); +const assert = require('assert'); + +function test(description, fn) { + try { + fn(); + console.log('✓', description); + } catch (err) { + console.error('✗', description); + console.error(' ', err.message); + process.exit(1); + } +} + +// Test 1: Should return empty string for fast PRs (≤1.5 hours by default) +test('Returns empty for PRs with avgOpenHours <= 1.5 (default)', () => { + const result = formatR2RCallout(0.5, 50000, 60, 70); + assert.strictEqual(result, ''); +}); + +test('Returns empty for PRs with avgOpenHours = 1.5 (default)', () => { + const result = formatR2RCallout(1.5, 50000, 60, 70); + assert.strictEqual(result, ''); +}); + +// Test 2: Should render callout for slow PRs (>1.5 hours by default) +test('Renders callout for PRs with avgOpenHours > 1.5 (default)', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.length > 0, 'Should return non-empty HTML'); +}); + +// Test 3: Should contain "Pro-Tip:" text +test('Contains "Pro-Tip:" text', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('Pro-Tip:'), 'Should contain "Pro-Tip:"'); +}); + +// Test 4: Should contain "Ready-to-Review" link +test('Contains "Ready-to-Review" link', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('Ready-to-Review'), 'Should contain "Ready-to-Review"'); + assert(result.includes('href="https://codegroove.dev/"'), 'Should link to codegroove.dev'); +}); + +// Test 5: Should contain OSS pricing message +test('Contains OSS pricing message', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('Free for OSS, cheap for everyone else'), 'Should contain OSS pricing message'); +}); + +// Test 6: Should format savings in thousands (K) +test('Formats savings with K suffix for thousands', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('$50K/yr'), 'Should format $50,000 as $50K/yr'); +}); + +// Test 7: Should format savings in millions (M) +test('Formats savings with M suffix for millions', () => { + const result = formatR2RCallout(10, 2500000, 60, 70); + assert(result.includes('$2.5M/yr'), 'Should format $2,500,000 as $2.5M/yr'); +}); + +// Test 8: Should format small savings without suffix +test('Formats small savings without suffix', () => { + const result = formatR2RCallout(10, 500, 60, 70); + assert(result.includes('$500/yr'), 'Should format $500 as $500/yr'); +}); + +// Test 9: Should contain "reducing merge times to <1.5h" (default) +test('Contains merge time reduction message (default 1.5h)', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('reducing merge times to <1.5h'), 'Should mention reducing merge times to <1.5h'); +}); + +// Test 9b: Should use custom target merge time when provided +test('Uses custom target merge time when provided', () => { + const result = formatR2RCallout(10, 50000, 60, 70, 2.0); + assert(result.includes('reducing merge times to <2.0h'), 'Should mention reducing merge times to <2.0h'); +}); + +// Test 10: Should contain proper HTML structure +test('Contains proper HTML div wrapper', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.startsWith(''), 'Should end with '); +}); + +// Test 11: Should use green color scheme +test('Uses green color scheme', () => { + const result = formatR2RCallout(10, 50000, 60, 70); + assert(result.includes('#00c853'), 'Should include green color #00c853'); +}); + +console.log('\nAll tests passed! ✓'); diff --git a/internal/server/static/index.html b/internal/server/static/index.html index 7c6dacd..d0e4303 100644 --- a/internal/server/static/index.html +++ b/internal/server/static/index.html @@ -1480,9 +1480,9 @@

Why calculate PR costs?

} let html = '
'; - html += '✓ You\'re losing ' + savingsText + '/yr' + throughputText + ' to code review lag. '; - html += 'Ready-to-Review fixes it: <40min merges, free for OSS. '; - html += 'go-faster@codeGROOVE.dev'; + html += 'Pro-Tip: Save ' + savingsText + '/yr in lost development effort by reducing merge times to <1h with '; + html += 'Ready-to-Review. '; + html += 'Free for OSS, cheap for everyone else.'; html += '
'; return html; } @@ -1510,7 +1510,7 @@

Why calculate PR costs?

} let html = '
'; - html += '💡 Merge Time Modeling: If you lowered your average merge time to 1h, you would save ~' + savingsText + '/yr in engineering overhead' + throughputText + '.'; + html += '💡 Merge Time Modeling: If you lowered your average merge time to 1.5h, you would save ~' + savingsText + '/yr in engineering overhead' + throughputText + '.'; html += '
'; return html; } @@ -1592,16 +1592,26 @@

Why calculate PR costs?

output += ' ───────────\n'; const cappedLabel = b.delay_capped ? ' (capped)' : ''; output += ` Workstream blockage ${formatCurrency(b.delay_cost_detail.delivery_delay_cost).padStart(12)} ${formatTimeUnit(b.delay_cost_detail.delivery_delay_hours)}${cappedLabel}\n`; - const mergeDelayCost = b.delay_cost_detail.delivery_delay_cost + b.delay_cost_detail.code_churn_cost + b.delay_cost_detail.automated_updates_cost + b.delay_cost_detail.pr_tracking_cost; - const mergeDelayHours = b.delay_cost_detail.delivery_delay_hours + b.delay_cost_detail.code_churn_hours + b.delay_cost_detail.automated_updates_hours + b.delay_cost_detail.pr_tracking_hours; + const mergeDelayCost = b.delay_cost_detail.delivery_delay_cost + b.delay_cost_detail.automated_updates_cost + b.delay_cost_detail.pr_tracking_cost; + const mergeDelayHours = b.delay_cost_detail.delivery_delay_hours + b.delay_cost_detail.automated_updates_hours + b.delay_cost_detail.pr_tracking_hours; output += ' ────────────\n'; pct = (mergeDelayCost / b.total_cost) * 100; output += formatSubtotalLine("Subtotal", mergeDelayCost, formatTimeUnit(mergeDelayHours), `(${pct.toFixed(1)}%)`); output += '\n'; + // Preventable Future Costs + if (b.delay_cost_detail.rework_percentage > 0) { + output += ' Preventable Future Costs\n'; + output += ' ────────────────────────\n'; + output += ` Rework due to churn (${Math.round(b.delay_cost_detail.rework_percentage)}% drift) ${formatCurrency(b.delay_cost_detail.code_churn_cost).padStart(12)} ${formatTimeUnit(b.delay_cost_detail.code_churn_hours)}\n`; + output += ' ────────────\n'; + pct = (b.delay_cost_detail.code_churn_cost / b.total_cost) * 100; + output += formatSubtotalLine("Subtotal", b.delay_cost_detail.code_churn_cost, formatTimeUnit(b.delay_cost_detail.code_churn_hours), `(${pct.toFixed(1)}%)`); + output += '\n'; + } + // Future Costs - const hasFuture = b.delay_cost_detail.rework_percentage > 0 || - b.delay_cost_detail.future_review_cost > 0 || + const hasFuture = b.delay_cost_detail.future_review_cost > 0 || b.delay_cost_detail.future_merge_cost > 0 || b.delay_cost_detail.future_context_cost > 0; @@ -1609,9 +1619,6 @@

Why calculate PR costs?

output += ' Future Costs\n'; output += ' ────────────\n'; - if (b.delay_cost_detail.rework_percentage > 0) { - output += ` Code Churn (${Math.round(b.delay_cost_detail.rework_percentage)}% drift) ${formatCurrency(b.delay_cost_detail.code_churn_cost).padStart(12)} ${formatTimeUnit(b.delay_cost_detail.code_churn_hours)}\n`; - } if (b.delay_cost_detail.future_review_cost > 0) { output += ` Review ${formatCurrency(b.delay_cost_detail.future_review_cost).padStart(12)} ${formatTimeUnit(b.delay_cost_detail.future_review_hours)}\n`; } @@ -1622,9 +1629,9 @@

Why calculate PR costs?

output += ` Context Switching ${formatCurrency(b.delay_cost_detail.future_context_cost).padStart(12)} ${formatTimeUnit(b.delay_cost_detail.future_context_hours)}\n`; } - const futureCost = b.delay_cost_detail.code_churn_cost + b.delay_cost_detail.future_review_cost + + const futureCost = b.delay_cost_detail.future_review_cost + b.delay_cost_detail.future_merge_cost + b.delay_cost_detail.future_context_cost; - const futureHours = b.delay_cost_detail.code_churn_hours + b.delay_cost_detail.future_review_hours + + const futureHours = b.delay_cost_detail.future_review_hours + b.delay_cost_detail.future_merge_hours + b.delay_cost_detail.future_context_hours; output += ' ────────────\n'; pct = (futureCost / b.total_cost) * 100; @@ -1840,23 +1847,31 @@

Why calculate PR costs?

if (avgPRTrackingCost > 0.01) { output += formatItemLine("PR Tracking", avgPRTrackingCost, formatTimeUnit(avgPRTrackingHours), `(${e.open_prs} open PRs)`); } - const avgMergeDelayCost = avgDeliveryDelayCost + avgCodeChurnCost + avgAutomatedUpdatesCost + avgPRTrackingCost; - const avgMergeDelayHours = avgDeliveryDelayHours + avgCodeChurnHours + avgAutomatedUpdatesHours + avgPRTrackingHours; + const avgMergeDelayCost = avgDeliveryDelayCost + avgAutomatedUpdatesCost + avgPRTrackingCost; + const avgMergeDelayHours = avgDeliveryDelayHours + avgAutomatedUpdatesHours + avgPRTrackingHours; output += ' ──────────\n'; pct = (avgMergeDelayCost / avgTotalCost) * 100; output += formatSubtotalLine("Subtotal", avgMergeDelayCost, formatTimeUnit(avgMergeDelayHours), `(${pct.toFixed(1)}%)`); output += '\n'; + // Preventable Future Costs + if (e.code_churn_cost > 0.01) { + output += ' Preventable Future Costs\n'; + output += ' ────────────────────────\n'; + const avgReworkPct = e.avg_rework_percentage || 0; + const label = avgReworkPct > 0 ? `Rework due to churn (${avgReworkPct.toFixed(0)}% drift)` : 'Rework due to churn'; + output += formatItemLine(label, avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), `(${e.code_churn_pr_count} PRs)`); + output += ' ──────────\n'; + pct = (avgCodeChurnCost / avgTotalCost) * 100; + output += formatSubtotalLine("Subtotal", avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), `(${pct.toFixed(1)}%)`); + output += '\n'; + } + // Future Costs - const hasFuture = e.code_churn_cost > 0.01 || e.future_review_cost > 0.01 || e.future_merge_cost > 0.01 || e.future_context_cost > 0.01; + const hasFuture = e.future_review_cost > 0.01 || e.future_merge_cost > 0.01 || e.future_context_cost > 0.01; if (hasFuture) { output += ' Future Costs\n'; output += ' ────────────\n'; - if (e.code_churn_cost > 0.01) { - const avgReworkPct = e.avg_rework_percentage || 0; - const label = avgReworkPct > 0 ? `Code Churn (${avgReworkPct.toFixed(0)}% drift)` : 'Code Churn'; - output += formatItemLine(label, avgCodeChurnCost, formatTimeUnit(avgCodeChurnHours), `(${e.code_churn_pr_count} PRs)`); - } if (e.future_review_cost > 0.01) { output += formatItemLine("Review", avgFutureReviewCost, formatTimeUnit(avgFutureReviewHours), `(${e.future_review_pr_count} PRs)`); } @@ -1867,8 +1882,8 @@

Why calculate PR costs?

const avgFutureContextSessions = e.future_context_sessions / totalPRs; output += formatItemLine("Context Switching", avgFutureContextCost, formatTimeUnit(avgFutureContextHours), `(${avgFutureContextSessions.toFixed(1)} sessions)`); } - const avgFutureCost = avgCodeChurnCost + avgFutureReviewCost + avgFutureMergeCost + avgFutureContextCost; - const avgFutureHours = avgCodeChurnHours + avgFutureReviewHours + avgFutureMergeHours + avgFutureContextHours; + const avgFutureCost = avgFutureReviewCost + avgFutureMergeCost + avgFutureContextCost; + const avgFutureHours = avgFutureReviewHours + avgFutureMergeHours + avgFutureContextHours; output += ' ──────────\n'; pct = (avgFutureCost / avgTotalCost) * 100; output += formatSubtotalLine("Subtotal", avgFutureCost, formatTimeUnit(avgFutureHours), `(${pct.toFixed(1)}%)`); @@ -1951,21 +1966,29 @@

Why calculate PR costs?

output += formatItemLine("PR Tracking", e.pr_tracking_cost, formatTimeUnit(e.pr_tracking_hours), `(${e.open_prs || 0} open PRs)`); } - const mergeDelayCost = (e.delivery_delay_cost || 0) + (e.code_churn_cost || 0) + (e.automated_updates_cost || 0) + (e.pr_tracking_cost || 0); - const mergeDelayHours = (e.delivery_delay_hours || 0) + (e.code_churn_hours || 0) + (e.automated_updates_hours || 0) + (e.pr_tracking_hours || 0); + const mergeDelayCost = (e.delivery_delay_cost || 0) + (e.automated_updates_cost || 0) + (e.pr_tracking_cost || 0); + const mergeDelayHours = (e.delivery_delay_hours || 0) + (e.automated_updates_hours || 0) + (e.pr_tracking_hours || 0); output += ' ──────────\n'; pct = (mergeDelayCost / e.total_cost) * 100; output += formatSubtotalLine("Subtotal", mergeDelayCost, formatTimeUnit(mergeDelayHours), `(${pct.toFixed(1)}%)`); output += '\n'; + // Preventable Future Costs + if ((e.code_churn_cost || 0) > 0.01) { + output += ' Preventable Future Costs\n'; + output += ' ────────────────────────\n'; + output += formatItemLine("Rework due to churn", e.code_churn_cost, formatTimeUnit(e.code_churn_hours), `(${e.code_churn_pr_count || 0} PRs)`); + output += ' ──────────\n'; + pct = (e.code_churn_cost / e.total_cost) * 100; + output += formatSubtotalLine("Subtotal", e.code_churn_cost, formatTimeUnit(e.code_churn_hours), `(${pct.toFixed(1)}%)`); + output += '\n'; + } + // Future Costs - const hasFuture = (e.code_churn_cost || 0) > 0.01 || (e.future_review_cost || 0) > 0.01 || (e.future_merge_cost || 0) > 0.01 || (e.future_context_cost || 0) > 0.01; + const hasFuture = (e.future_review_cost || 0) > 0.01 || (e.future_merge_cost || 0) > 0.01 || (e.future_context_cost || 0) > 0.01; if (hasFuture) { output += ' Future Costs\n'; output += ' ────────────\n'; - if ((e.code_churn_cost || 0) > 0.01) { - output += formatItemLine("Code Churn", e.code_churn_cost, formatTimeUnit(e.code_churn_hours), ""); - } if ((e.future_review_cost || 0) > 0.01) { const openPRs = e.open_prs || 0; output += formatItemLine("Review", e.future_review_cost, formatTimeUnit(e.future_review_hours), `(${openPRs} PRs)`); @@ -1977,8 +2000,8 @@

Why calculate PR costs?

if ((e.future_context_cost || 0) > 0.01) { output += formatItemLine("Context Switching", e.future_context_cost, formatTimeUnit(e.future_context_hours), `(${e.future_context_sessions || 0} sessions)`); } - const futureCost = (e.code_churn_cost || 0) + (e.future_review_cost || 0) + (e.future_merge_cost || 0) + (e.future_context_cost || 0); - const futureHours = (e.code_churn_hours || 0) + (e.future_review_hours || 0) + (e.future_merge_hours || 0) + (e.future_context_hours || 0); + const futureCost = (e.future_review_cost || 0) + (e.future_merge_cost || 0) + (e.future_context_cost || 0); + const futureHours = (e.future_review_hours || 0) + (e.future_merge_hours || 0) + (e.future_context_hours || 0); output += ' ──────────\n'; pct = (futureCost / e.total_cost) * 100; output += formatSubtotalLine("Subtotal", futureCost, formatTimeUnit(futureHours), `(${pct.toFixed(1)}%)`); @@ -2248,8 +2271,8 @@

Why calculate PR costs?

html += formatEfficiencyHTML(extEfficiencyPct, extEfficiency.grade, extEfficiency.message, extPreventableCost, extPreventableHours, e.total_cost, e.total_hours, avgPRDurationHours, true, annualWasteCost, annualWasteHours, wasteHoursPerWeek, wasteCostPerWeek, wasteHoursPerAuthorPerWeek, wasteCostPerAuthorPerWeek, totalAuthors, salary, benefitsMultiplier, analysisType, sourceName); // Add R2R callout if enabled, otherwise generic merge time callout - // Calculate modeled efficiency (with 40min/1h merge time) - const targetMergeHours = 40 / 60; // 40 minutes in hours + // Calculate modeled efficiency (with 1.5h merge time) + const targetMergeHours = 1.5; // 1.5 hours (90 minutes) const hourlyRate = (salary * benefitsMultiplier) / 2080; // Remodel preventable costs with target merge time @@ -2257,7 +2280,7 @@

Why calculate PR costs?

const remodelDeliveryDelayPerPR = hourlyRate * deliveryDelayFactor * targetMergeHours; const remodelCodeChurnPerPR = 0; // < 1 day, no churn const remodelAutomatedUpdatesPerPR = 0; // < 1 day, no automated updates - const remodelPRTrackingPerPR = 0; // < 1 hour, no tracking + const remodelPRTrackingPerPR = 0; // < 2 hours, minimal tracking const totalPRs = e.total_prs; const remodelPreventableCost = (remodelDeliveryDelayPerPR + remodelCodeChurnPerPR + diff --git a/pkg/cost/cost.go b/pkg/cost/cost.go index eac5456..292c654 100644 --- a/pkg/cost/cost.go +++ b/pkg/cost/cost.go @@ -86,6 +86,34 @@ type Config struct { // Modification is cheaper because architecture is established and patterns are known. ModificationCostFactor float64 + // WeeklyChurnRate is the probability that code becomes stale per week (default: 0.0229 = 2.29%) + // Used to calculate rework percentage for open PRs based on time since last commit. + // Formula: rework = 1 - (1 - weekly_rate)^weeks + // + // Default of 2.29% per week is based on empirical analysis across organizations: + // - 60% of analyzed organizations had churn rates of 2.29%/week or lower + // - 40% had higher churn rates + // - Younger companies tend to have higher churn rates + // - Results in 70% annual churn, reasonable for active development + // + // Examples from empirical data: + // - 0.0018 (0.18%/week) - Adobe (mature, stable codebase) + // - 0.0229 (2.29%/week) - 60th percentile (default) + // - 0.0831 (8.31%/week) - Chainguard (young company, fast-moving) + // + // Recommended values for different project types: + // - 0.010 (1.0%/week) → 41% annual churn - stable projects, mature codebases + // - 0.0229 (2.29%/week) → 70% annual churn - typical active development (default, 60th percentile) + // - 0.030 (3.0%/week) → 78% annual churn - fast-moving projects + // - 0.040 (4.0%/week) → 88% annual churn - very high churn + // - 0.080+ (8%+/week) → 99%+ annual churn - extremely fast-moving, younger companies + WeeklyChurnRate float64 + + // TargetMergeTimeHours is the target merge time in hours for efficiency modeling (default: 1.5 hours / 90 minutes) + // Used to calculate potential savings if merge times were reduced to this target. + // This represents a realistic goal for well-optimized PR workflows. + TargetMergeTimeHours float64 + // COCOMO configuration for estimating code writing effort COCOMO cocomo.Config } @@ -108,6 +136,8 @@ func DefaultConfig() Config { MaxCodeDrift: 90 * 24 * time.Hour, // 90 days ReviewInspectionRate: 275.0, // 275 LOC/hour (average of optimal 150-400 range) ModificationCostFactor: 0.4, // Modified code costs 40% of new code + WeeklyChurnRate: 0.0229, // 2.29% per week (70% annual, 60th percentile empirical) + TargetMergeTimeHours: 1.5, // 1.5 hours (90 minutes) target for efficiency modeling COCOMO: cocomo.DefaultConfig(), } } @@ -347,29 +377,21 @@ func Calculate(data PRData, cfg Config) Breakdown { // 2. Code Churn (Rework): Probability-based drift formula // Only calculated for open PRs - closed PRs won't need future updates // - // Research basis: - // - Windows Vista: 4-8% weekly code churn (Nagappan et al., Microsoft Research, 2008) - // - Using 4% weekly baseline for active repositories - // // Formula: Probability that a line becomes stale over time // drift = 1 - (1 - weeklyChurn)^(weeks) - // drift = 1 - (0.96)^(days/7) + // Default: weeklyChurn = 2.29% (0.0229) - empirical 60th percentile // // This models the cumulative probability that any given line in the PR needs rework - // due to codebase changes. Unlike compounding formulas, this accounts for the fact - // that the same code areas often change multiple times. - // - // Expected drift percentages: - // - 3 days: ~2% drift - // - 7 days: ~4% drift (matches weekly churn) - // - 14 days: ~8% drift - // - 30 days: ~16% drift - // - 60 days: ~29% drift - // - 90 days: ~41% drift (days capped at 90) + // due to codebase changes. The weekly churn rate is configurable to match project velocity. // - // Reference: - // Nagappan, N., Murphy, B., & Basili, V. (2008). The Influence of Organizational - // Structure on Software Quality. ACM/IEEE ICSE. DOI: 10.1145/1368088.1368160 + // Default (2.29% per week) drift percentages: + // - 1 week: ~2.3% drift + // - 2 weeks: ~4.5% drift + // - 3 weeks: ~6.7% drift + // - 1 month: ~8.9% drift + // - 2 months: ~16.9% drift + // - 3 months: ~24.3% drift + // - 1 year: ~70% annual churn (empirical data from org analysis) var reworkLOC int var codeChurnHours float64 @@ -414,9 +436,11 @@ func Calculate(data PRData, cfg Config) Breakdown { cappedDriftDays = maxDriftDays } - // Probability-based drift: 1 - (1 - 0.04)^(days/7) + // Probability-based drift using configurable weekly churn rate + // Formula: rework = 1 - (1 - weekly_rate)^weeks + // Default: 1% per week → 41% annual churn weeks := cappedDriftDays / 7.0 - reworkPercentage = 1.0 - math.Pow(0.96, weeks) + reworkPercentage = 1.0 - math.Pow(1.0-cfg.WeeklyChurnRate, weeks) reworkLOC = int(float64(data.LinesAdded) * reworkPercentage) diff --git a/pkg/cost/cost_test.go b/pkg/cost/cost_test.go index 2ca4c55..377dc47 100644 --- a/pkg/cost/cost_test.go +++ b/pkg/cost/cost_test.go @@ -187,6 +187,8 @@ func TestCalculateWithRealPRData(t *testing.T) { } `json:"events"` PullRequest struct { CreatedAt string `json:"created_at"` + ClosedAt string `json:"closed_at"` + MergedAt string `json:"merged_at"` Author string `json:"author"` Additions int `json:"additions"` AuthorWriteAccess int `json:"author_write_access"` @@ -219,11 +221,20 @@ func TestCalculateWithRealPRData(t *testing.T) { t.Fatalf("Failed to parse created_at: %v", err) } + var closedAt time.Time + if prxData.PullRequest.ClosedAt != "" { + closedAt, err = time.Parse(time.RFC3339, prxData.PullRequest.ClosedAt) + if err != nil { + t.Fatalf("Failed to parse closed_at: %v", err) + } + } + prData := PRData{ LinesAdded: prxData.PullRequest.Additions, Author: prxData.PullRequest.Author, Events: events, CreatedAt: createdAt, + ClosedAt: closedAt, } cfg := DefaultConfig() @@ -1526,7 +1537,7 @@ func TestExtrapolateFromSamplesR2RSavings(t *testing.T) { } // For a 3-day PR, there should be significant savings - // (R2R targets 40-minute PRs, which would eliminate most delay costs) + // (R2R targets 1.5-hour PRs, which would eliminate most delay costs) if result.R2RSavings == 0 { t.Error("Expected positive R2R savings for long-duration PRs") } diff --git a/pkg/cost/extrapolate.go b/pkg/cost/extrapolate.go index c836e46..69f6e31 100644 --- a/pkg/cost/extrapolate.go +++ b/pkg/cost/extrapolate.go @@ -96,7 +96,7 @@ type ExtrapolatedBreakdown struct { // R2R cost savings calculation UniqueNonBotUsers int `json:"unique_non_bot_users"` // Count of unique non-bot users (authors + participants) - R2RSavings float64 `json:"r2r_savings"` // Annual savings if R2R cuts PR time to 40 minutes + R2RSavings float64 `json:"r2r_savings"` // Annual savings if R2R cuts PR time to target merge time } // ExtrapolateFromSamples calculates extrapolated cost estimates from a sample @@ -324,7 +324,11 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu avgReworkPercentage = sumReworkPercentage / float64(countCodeChurn) } - extTotalCost := sumTotalCost / samples * multiplier + // Calculate total cost by summing components + // Note: We recalculate this instead of using sumTotalCost because PR tracking cost + // is computed org-wide (actualOpenPRs × uniqueUsers) rather than extrapolated from samples + extTotalCost := extAuthorTotal + extParticipantCost + extDeliveryDelayCost + extCodeChurnCost + + extAutomatedUpdatesCost + extPRTrackingCost + extFutureReviewCost + extFutureMergeCost + extFutureContextCost extTotalHours := extAuthorHours + extParticipantHours + extDelayHours // Calculate waste per week metrics @@ -385,11 +389,11 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu preventableCost := extCodeChurnCost + extDeliveryDelayCost + extAutomatedUpdatesCost + extPRTrackingCost baselineAnnualWaste := preventableCost * (52.0 / (float64(daysInPeriod) / 7.0)) - // Re-model with 40-minute PR merge times - // We need to recalculate delivery delay and future costs assuming all PRs take 40 minutes (2/3 hour) - const targetMergeTimeHours = 40.0 / 60.0 // 40 minutes in hours + // Re-model with target PR merge time from config + // We need to recalculate delivery delay and future costs assuming all PRs take the target merge time + targetMergeTimeHours := cfg.TargetMergeTimeHours - // Recalculate delivery delay cost with 40-minute PRs + // Recalculate delivery delay cost with target merge time PRs // Delivery delay formula: hourlyRate × deliveryDelayFactor × PR duration var remodelDeliveryDelayCost float64 for range breakdowns { @@ -397,19 +401,19 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu } extRemodelDeliveryDelayCost := remodelDeliveryDelayCost / samples * multiplier - // Recalculate code churn with 40-minute PRs + // Recalculate code churn with target merge time PRs // Code churn is proportional to PR duration (rework percentage increases with time) - // For 40 minutes, rework percentage would be minimal (< 1 day, so ~0%) - extRemodelCodeChurnCost := 0.0 // 40 minutes is too short for meaningful code churn + // For target merge times < 1 day, rework percentage would be minimal (~0%) + extRemodelCodeChurnCost := 0.0 // Target merge time is too short for meaningful code churn // Recalculate automated updates cost // Automated updates are calculated based on PR duration - // With 40-minute PRs, no bot updates would be needed (happens after 1 day) - extRemodelAutomatedUpdatesCost := 0.0 // 40 minutes is too short for automated updates + // With target merge time PRs, no bot updates would be needed (happens after 1 day) + extRemodelAutomatedUpdatesCost := 0.0 // Target merge time is too short for automated updates // Recalculate PR tracking cost // With faster merge times, we'd have fewer open PRs at any given time - // Estimate: if current avg is X hours, and we reduce to 40 min, open PRs would be (40min / X hours) of current + // Estimate: if current avg is X hours, and we reduce to target, open PRs would be (target / X hours) of current var extRemodelPRTrackingCost float64 var currentAvgOpenTime float64 if successfulSamples > 0 {