diff --git a/cmd/server/main.go b/cmd/server/main.go
index cb1fad7..fb9cd14 100644
--- a/cmd/server/main.go
+++ b/cmd/server/main.go
@@ -9,6 +9,7 @@ import (
"net/http"
"os"
"os/signal"
+ "path/filepath"
"runtime"
"syscall"
"time"
@@ -36,10 +37,19 @@ func main() {
// Create root context
ctx := context.Background()
- // Set up logging
+ // Set up logging with short source paths
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: slog.LevelInfo,
+ ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
+ // Shorten source file paths to show only filename:line
+ if a.Key == slog.SourceKey {
+ if src, ok := a.Value.Any().(*slog.Source); ok {
+ src.File = filepath.Base(src.File)
+ }
+ }
+ return a
+ },
}))
slog.SetDefault(logger)
diff --git a/internal/server/server.go b/internal/server/server.go
index be138cf..736fb80 100644
--- a/internal/server/server.go
+++ b/internal/server/server.go
@@ -196,8 +196,11 @@ func New() *Server {
logger.InfoContext(ctx, "No fallback token available - requests must provide Authorization header")
}
- // Start cache cleanup goroutine.
- go server.cleanupCachesPeriodically()
+ // Note: We don't clear caches periodically because:
+ // - PR data is immutable (closed PRs don't change)
+ // - Memory usage is bounded by request patterns
+ // - Cloud Run instances are ephemeral and restart frequently anyway
+ // If needed in the future, implement LRU eviction with size limits instead of time-based clearing
return server
}
@@ -304,36 +307,6 @@ func (s *Server) limiter(ctx context.Context, ip string) *rate.Limiter {
return limiter
}
-// cleanupCachesPeriodically clears all caches every 30 minutes to prevent unbounded growth.
-// Cloud Run instances are ephemeral, so no complex TTL logic is needed.
-func (s *Server) cleanupCachesPeriodically() {
- ticker := time.NewTicker(30 * time.Minute)
- defer ticker.Stop()
-
- for range ticker.C {
- s.clearCache(&s.prQueryCacheMu, s.prQueryCache, "pr_query")
- s.clearCache(&s.prDataCacheMu, s.prDataCache, "pr_data")
- }
-}
-
-// clearCache removes all entries from a cache.
-func (s *Server) clearCache(mu *sync.RWMutex, cache map[string]*cacheEntry, name string) {
- mu.Lock()
- defer mu.Unlock()
-
- count := len(cache)
- // Clear map by creating new map
- for key := range cache {
- delete(cache, key)
- }
-
- if count > 0 {
- s.logger.Info("Cleared cache",
- "cache", name,
- "cleared", count)
- }
-}
-
// cachedPRQuery retrieves cached PR query results.
func (s *Server) cachedPRQuery(key string) ([]github.PRSummary, bool) {
s.prQueryCacheMu.RLock()
diff --git a/internal/server/static/index.html b/internal/server/static/index.html
index 7aef1ab..6d47638 100644
--- a/internal/server/static/index.html
+++ b/internal/server/static/index.html
@@ -1412,15 +1412,26 @@
Why calculate PR costs?
return html;
}
- function formatR2RCallout(avgOpenHours) {
+ function formatR2RCallout(avgOpenHours, r2rSavings) {
// Only show if average merge velocity is > 1 hour
if (avgOpenHours <= 1) {
return '';
}
+ // Format savings with appropriate precision
+ let savingsText;
+ if (r2rSavings >= 1000000) {
+ savingsText = '$' + (r2rSavings / 1000000).toFixed(1) + 'M';
+ } else if (r2rSavings >= 1000) {
+ savingsText = '$' + (r2rSavings / 1000).toFixed(0) + 'K';
+ } else {
+ savingsText = '$' + r2rSavings.toFixed(0);
+ }
+
let html = '';
- html += '✓
Ready-to-Review:
$4/mo to cut merge latency to
≤40 min';
- html += 'Stop losing engineering hours to code review lag. Free for OSS projects. Let\'s chat:
go-faster@codeGROOVE.dev';
+ html += '✓ Based on this calculation,
Ready-to-Review would save you
~' + savingsText + '/yr by cutting average merge time to ≤40 min. ';
+ html += 'Stop losing engineering hours to code review lag. Free for OSS projects. ';
+ html += 'Let\'s chat:
go-faster@codeGROOVE.dev';
html += '
';
return html;
}
@@ -2025,7 +2036,8 @@ Why calculate PR costs?
// Add R2R callout if enabled and merge velocity > 1 hour
if (data.r2r_callout) {
- html += formatR2RCallout(avgPRDurationHours);
+ const r2rSavings = e.r2r_savings || 0;
+ html += formatR2RCallout(avgPRDurationHours, r2rSavings);
}
// Calculate average PR efficiency
diff --git a/pkg/cost/extrapolate.go b/pkg/cost/extrapolate.go
index 0620731..0ddc1e9 100644
--- a/pkg/cost/extrapolate.go
+++ b/pkg/cost/extrapolate.go
@@ -82,6 +82,10 @@ type ExtrapolatedBreakdown struct {
// Grand totals
TotalCost float64 `json:"total_cost"`
TotalHours float64 `json:"total_hours"`
+
+ // R2R cost savings calculation
+ UniqueNonBotUsers int `json:"unique_non_bot_users"` // Count of unique non-bot users (authors + participants)
+ R2RSavings float64 `json:"r2r_savings"` // Annual savings if R2R cuts PR time to 40 minutes
}
// ExtrapolateFromSamples calculates extrapolated cost estimates from a sample
@@ -113,6 +117,8 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
// Track unique PR authors (excluding bots)
uniqueAuthors := make(map[string]bool)
+ // Track unique non-bot users (authors + participants)
+ uniqueNonBotUsers := make(map[string]bool)
// Track bot vs human PR metrics
var humanPRCount, botPRCount int
@@ -140,6 +146,7 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
// Track unique PR authors only (excluding bots)
if !breakdown.AuthorBot {
uniqueAuthors[breakdown.PRAuthor] = true
+ uniqueNonBotUsers[breakdown.PRAuthor] = true
humanPRCount++
sumHumanPRDuration += breakdown.PRDuration
} else {
@@ -150,6 +157,12 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
sumBotModifiedLines += breakdown.Author.ModifiedLines
}
+ // Track unique participants (excluding bots)
+ for _, p := range breakdown.Participants {
+ // Participants from the Breakdown struct are already filtered to exclude bots
+ uniqueNonBotUsers[p.Actor] = true
+ }
+
// Accumulate PR duration (all PRs)
sumPRDuration += breakdown.PRDuration
@@ -322,6 +335,62 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
extHumanPRs := int(float64(humanPRCount) / samples * multiplier)
extBotPRs := int(float64(botPRCount) / samples * multiplier)
+ // Calculate R2R savings
+ // Formula: baseline annual waste - (re-modeled waste with 40min PRs) - (R2R subscription cost)
+ // Baseline annual waste: preventable cost extrapolated to 52 weeks
+ uniqueUserCount := len(uniqueNonBotUsers)
+ baselineAnnualWaste := (extCodeChurnCost + extDeliveryDelayCost + extAutomatedUpdatesCost + extPRTrackingCost) * (52.0 / (float64(daysInPeriod) / 7.0))
+
+ // Re-model with 40-minute PR merge times
+ // We need to recalculate delivery delay and future costs assuming all PRs take 40 minutes (2/3 hour)
+ const targetMergeTimeHours = 40.0 / 60.0 // 40 minutes in hours
+
+ // Recalculate delivery delay cost with 40-minute PRs
+ // Delivery delay formula: hourlyRate × deliveryDelayFactor × PR duration
+ var remodelDeliveryDelayCost float64
+ for range breakdowns {
+ remodelDeliveryDelayCost += hourlyRate * cfg.DeliveryDelayFactor * targetMergeTimeHours
+ }
+ extRemodelDeliveryDelayCost := remodelDeliveryDelayCost / samples * multiplier
+
+ // Recalculate code churn with 40-minute PRs
+ // Code churn is proportional to PR duration (rework percentage increases with time)
+ // For 40 minutes, rework percentage would be minimal (< 1 day, so ~0%)
+ extRemodelCodeChurnCost := 0.0 // 40 minutes is too short for meaningful code churn
+
+ // Recalculate automated updates cost
+ // Automated updates are calculated based on PR duration
+ // With 40-minute PRs, no bot updates would be needed (happens after 1 day)
+ extRemodelAutomatedUpdatesCost := 0.0 // 40 minutes is too short for automated updates
+
+ // Recalculate PR tracking cost
+ // With faster merge times, we'd have fewer open PRs at any given time
+ // Estimate: if current avg is X hours, and we reduce to 40 min, open PRs would be (40min / X hours) of current
+ var extRemodelPRTrackingCost float64
+ var currentAvgOpenTime float64
+ if successfulSamples > 0 {
+ currentAvgOpenTime = sumPRDuration / samples
+ }
+ if currentAvgOpenTime > 0 {
+ openPRReductionRatio := targetMergeTimeHours / currentAvgOpenTime
+ extRemodelPRTrackingCost = extPRTrackingCost * openPRReductionRatio
+ } else {
+ extRemodelPRTrackingCost = 0.0
+ }
+
+ // Calculate re-modeled annual waste
+ remodelPreventablePerPeriod := extRemodelDeliveryDelayCost + extRemodelCodeChurnCost + extRemodelAutomatedUpdatesCost + extRemodelPRTrackingCost
+ remodelAnnualWaste := remodelPreventablePerPeriod * (52.0 / (float64(daysInPeriod) / 7.0))
+
+ // Subtract R2R subscription cost: $4/mo * 12 months * unique user count
+ r2rAnnualCost := 4.0 * 12.0 * float64(uniqueUserCount)
+
+ // Calculate savings
+ r2rSavings := baselineAnnualWaste - remodelAnnualWaste - r2rAnnualCost
+ if r2rSavings < 0 {
+ r2rSavings = 0 // Don't show negative savings
+ }
+
return ExtrapolatedBreakdown{
TotalPRs: totalPRs,
HumanPRs: extHumanPRs,
@@ -390,5 +459,8 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
TotalCost: extTotalCost,
TotalHours: extTotalHours,
+
+ UniqueNonBotUsers: uniqueUserCount,
+ R2RSavings: r2rSavings,
}
}