Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 11 additions & 1 deletion cmd/server/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"net/http"
"os"
"os/signal"
"path/filepath"
"runtime"
"syscall"
"time"
Expand Down Expand Up @@ -36,10 +37,19 @@ func main() {
// Create root context
ctx := context.Background()

// Set up logging
// Set up logging with short source paths
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{
AddSource: true,
Level: slog.LevelInfo,
ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr {
// Shorten source file paths to show only filename:line
if a.Key == slog.SourceKey {
if src, ok := a.Value.Any().(*slog.Source); ok {
src.File = filepath.Base(src.File)
}
}
return a
},
}))
slog.SetDefault(logger)

Expand Down
37 changes: 5 additions & 32 deletions internal/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,11 @@ func New() *Server {
logger.InfoContext(ctx, "No fallback token available - requests must provide Authorization header")
}

// Start cache cleanup goroutine.
go server.cleanupCachesPeriodically()
// Note: We don't clear caches periodically because:
// - PR data is immutable (closed PRs don't change)
// - Memory usage is bounded by request patterns
// - Cloud Run instances are ephemeral and restart frequently anyway
// If needed in the future, implement LRU eviction with size limits instead of time-based clearing

return server
}
Expand Down Expand Up @@ -304,36 +307,6 @@ func (s *Server) limiter(ctx context.Context, ip string) *rate.Limiter {
return limiter
}

// cleanupCachesPeriodically clears all caches every 30 minutes to prevent unbounded growth.
// Cloud Run instances are ephemeral, so no complex TTL logic is needed.
func (s *Server) cleanupCachesPeriodically() {
ticker := time.NewTicker(30 * time.Minute)
defer ticker.Stop()

for range ticker.C {
s.clearCache(&s.prQueryCacheMu, s.prQueryCache, "pr_query")
s.clearCache(&s.prDataCacheMu, s.prDataCache, "pr_data")
}
}

// clearCache removes all entries from a cache.
func (s *Server) clearCache(mu *sync.RWMutex, cache map[string]*cacheEntry, name string) {
mu.Lock()
defer mu.Unlock()

count := len(cache)
// Clear map by creating new map
for key := range cache {
delete(cache, key)
}

if count > 0 {
s.logger.Info("Cleared cache",
"cache", name,
"cleared", count)
}
}

// cachedPRQuery retrieves cached PR query results.
func (s *Server) cachedPRQuery(key string) ([]github.PRSummary, bool) {
s.prQueryCacheMu.RLock()
Expand Down
20 changes: 16 additions & 4 deletions internal/server/static/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1412,15 +1412,26 @@ <h3>Why calculate PR costs?</h3>
return html;
}

function formatR2RCallout(avgOpenHours) {
function formatR2RCallout(avgOpenHours, r2rSavings) {
// Only show if average merge velocity is > 1 hour
if (avgOpenHours <= 1) {
return '';
}

// Format savings with appropriate precision
let savingsText;
if (r2rSavings >= 1000000) {
savingsText = '$' + (r2rSavings / 1000000).toFixed(1) + 'M';
} else if (r2rSavings >= 1000) {
savingsText = '$' + (r2rSavings / 1000).toFixed(0) + 'K';
} else {
savingsText = '$' + r2rSavings.toFixed(0);
}

let html = '<div style="margin: 24px 0; padding: 12px 20px; background: linear-gradient(135deg, #e6f9f0 0%, #ffffff 100%); border-left: 3px solid #00c853; border-radius: 8px; font-size: 14px; color: #1d1d1f; line-height: 1.6;">';
html += '✓ <a href="https://codegroove.dev/" target="_blank" rel="noopener" style="color: #00c853; font-weight: 600; text-decoration: none;">Ready-to-Review</a>: <strong>$4/mo</strong> to cut merge latency to <strong>≤40 min</strong><br>';
html += 'Stop losing engineering hours to code review lag. Free for OSS projects. Let\'s chat: <a href="mailto:go-faster@codeGROOVE.dev" style="color: #00c853; text-decoration: none;">go-faster@codeGROOVE.dev</a>';
html += '✓ Based on this calculation, <a href="https://codegroove.dev/" target="_blank" rel="noopener" style="color: #00c853; font-weight: 600; text-decoration: none;">Ready-to-Review</a> would save you <strong>~' + savingsText + '/yr</strong> by cutting average merge time to ≤40 min. ';
html += 'Stop losing engineering hours to code review lag. Free for OSS projects. ';
html += 'Let\'s chat: <a href="mailto:go-faster@codeGROOVE.dev" style="color: #00c853; text-decoration: none;">go-faster@codeGROOVE.dev</a>';
html += '</div>';
return html;
}
Expand Down Expand Up @@ -2025,7 +2036,8 @@ <h3>Why calculate PR costs?</h3>

// Add R2R callout if enabled and merge velocity > 1 hour
if (data.r2r_callout) {
html += formatR2RCallout(avgPRDurationHours);
const r2rSavings = e.r2r_savings || 0;
html += formatR2RCallout(avgPRDurationHours, r2rSavings);
}

// Calculate average PR efficiency
Expand Down
72 changes: 72 additions & 0 deletions pkg/cost/extrapolate.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,10 @@ type ExtrapolatedBreakdown struct {
// Grand totals
TotalCost float64 `json:"total_cost"`
TotalHours float64 `json:"total_hours"`

// R2R cost savings calculation
UniqueNonBotUsers int `json:"unique_non_bot_users"` // Count of unique non-bot users (authors + participants)
R2RSavings float64 `json:"r2r_savings"` // Annual savings if R2R cuts PR time to 40 minutes
}

// ExtrapolateFromSamples calculates extrapolated cost estimates from a sample
Expand Down Expand Up @@ -113,6 +117,8 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu

// Track unique PR authors (excluding bots)
uniqueAuthors := make(map[string]bool)
// Track unique non-bot users (authors + participants)
uniqueNonBotUsers := make(map[string]bool)

// Track bot vs human PR metrics
var humanPRCount, botPRCount int
Expand Down Expand Up @@ -140,6 +146,7 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
// Track unique PR authors only (excluding bots)
if !breakdown.AuthorBot {
uniqueAuthors[breakdown.PRAuthor] = true
uniqueNonBotUsers[breakdown.PRAuthor] = true
humanPRCount++
sumHumanPRDuration += breakdown.PRDuration
} else {
Expand All @@ -150,6 +157,12 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
sumBotModifiedLines += breakdown.Author.ModifiedLines
}

// Track unique participants (excluding bots)
for _, p := range breakdown.Participants {
// Participants from the Breakdown struct are already filtered to exclude bots
uniqueNonBotUsers[p.Actor] = true
}

// Accumulate PR duration (all PRs)
sumPRDuration += breakdown.PRDuration

Expand Down Expand Up @@ -322,6 +335,62 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu
extHumanPRs := int(float64(humanPRCount) / samples * multiplier)
extBotPRs := int(float64(botPRCount) / samples * multiplier)

// Calculate R2R savings
// Formula: baseline annual waste - (re-modeled waste with 40min PRs) - (R2R subscription cost)
// Baseline annual waste: preventable cost extrapolated to 52 weeks
uniqueUserCount := len(uniqueNonBotUsers)
baselineAnnualWaste := (extCodeChurnCost + extDeliveryDelayCost + extAutomatedUpdatesCost + extPRTrackingCost) * (52.0 / (float64(daysInPeriod) / 7.0))

// Re-model with 40-minute PR merge times
// We need to recalculate delivery delay and future costs assuming all PRs take 40 minutes (2/3 hour)
const targetMergeTimeHours = 40.0 / 60.0 // 40 minutes in hours

// Recalculate delivery delay cost with 40-minute PRs
// Delivery delay formula: hourlyRate × deliveryDelayFactor × PR duration
var remodelDeliveryDelayCost float64
for range breakdowns {
remodelDeliveryDelayCost += hourlyRate * cfg.DeliveryDelayFactor * targetMergeTimeHours
}
extRemodelDeliveryDelayCost := remodelDeliveryDelayCost / samples * multiplier

// Recalculate code churn with 40-minute PRs
// Code churn is proportional to PR duration (rework percentage increases with time)
// For 40 minutes, rework percentage would be minimal (< 1 day, so ~0%)
extRemodelCodeChurnCost := 0.0 // 40 minutes is too short for meaningful code churn

// Recalculate automated updates cost
// Automated updates are calculated based on PR duration
// With 40-minute PRs, no bot updates would be needed (happens after 1 day)
extRemodelAutomatedUpdatesCost := 0.0 // 40 minutes is too short for automated updates

// Recalculate PR tracking cost
// With faster merge times, we'd have fewer open PRs at any given time
// Estimate: if current avg is X hours, and we reduce to 40 min, open PRs would be (40min / X hours) of current
var extRemodelPRTrackingCost float64
var currentAvgOpenTime float64
if successfulSamples > 0 {
currentAvgOpenTime = sumPRDuration / samples
}
if currentAvgOpenTime > 0 {
openPRReductionRatio := targetMergeTimeHours / currentAvgOpenTime
extRemodelPRTrackingCost = extPRTrackingCost * openPRReductionRatio
} else {
extRemodelPRTrackingCost = 0.0
}

// Calculate re-modeled annual waste
remodelPreventablePerPeriod := extRemodelDeliveryDelayCost + extRemodelCodeChurnCost + extRemodelAutomatedUpdatesCost + extRemodelPRTrackingCost
remodelAnnualWaste := remodelPreventablePerPeriod * (52.0 / (float64(daysInPeriod) / 7.0))

// Subtract R2R subscription cost: $4/mo * 12 months * unique user count
r2rAnnualCost := 4.0 * 12.0 * float64(uniqueUserCount)

// Calculate savings
r2rSavings := baselineAnnualWaste - remodelAnnualWaste - r2rAnnualCost
if r2rSavings < 0 {
r2rSavings = 0 // Don't show negative savings
}

return ExtrapolatedBreakdown{
TotalPRs: totalPRs,
HumanPRs: extHumanPRs,
Expand Down Expand Up @@ -390,5 +459,8 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu

TotalCost: extTotalCost,
TotalHours: extTotalHours,

UniqueNonBotUsers: uniqueUserCount,
R2RSavings: r2rSavings,
}
}