Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 16 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -44,16 +44,29 @@ cd goose && make run

If you want more control over which repositories the goose can access, you can use a GitHub personal access token instead:

1. Create a [GitHub personal access token](https://github.com/settings/tokens) with access to read pull-requests and repo metadata.
2. Set the `GITHUB_TOKEN` environment variable:
For maximum security, use a [fine-grained personal access token](https://github.com/settings/personal-access-tokens/new):

1. Go to GitHub Settings → Developer settings → Personal access tokens → Fine-grained tokens
2. Create a new token with:
- **Expiration**: Set a short expiration (30-90 days recommended)
- **Repository access**: Select only the specific repositories you want to monitor
- **Permissions**:
- Pull requests: Read
- Metadata: Read
3. Copy the token (starts with `github_pat_`)

If you need broader access, you can use a [classic token](https://github.com/settings/tokens):
- Create with `repo` scope (grants full repository access - use with caution)

#### Using the Token

```bash
export GITHUB_TOKEN=your_token_here
git clone https://github.com/ready-to-review/goose.git
cd goose && make run
```

When `GITHUB_TOKEN` is set, the goose will use it directly instead of the GitHub CLI, giving you precise control over repository access.
When `GITHUB_TOKEN` is set, the goose will use it directly instead of the GitHub CLI, giving you precise control over repository access. Fine-grained tokens are strongly recommended for better security.

## Known Issues

Expand Down
2 changes: 1 addition & 1 deletion cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
var data *turn.CheckResponse
err := retry.Do(func() error {
// Create timeout context for Turn API call
turnCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
turnCtx, cancel := context.WithTimeout(ctx, turnAPITimeout)
defer cancel()

var retryErr error
Expand Down
24 changes: 19 additions & 5 deletions github.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ import (

// initClients initializes GitHub and Turn API clients.
func (app *App) initClients(ctx context.Context) error {
token, err := app.githubToken(ctx)
token, err := app.token(ctx)
if err != nil {
return fmt.Errorf("get github token: %w", err)
}
Expand All @@ -44,8 +44,8 @@ func (app *App) initClients(ctx context.Context) error {
return nil
}

// githubToken retrieves the GitHub token from GITHUB_TOKEN env var or gh CLI.
func (*App) githubToken(ctx context.Context) (string, error) {
// token retrieves the GitHub token from GITHUB_TOKEN env var or gh CLI.
func (*App) token(ctx context.Context) (string, error) {
// First check for GITHUB_TOKEN environment variable
if token := os.Getenv("GITHUB_TOKEN"); token != "" {
token = strings.TrimSpace(token)
Expand Down Expand Up @@ -390,7 +390,10 @@ func (app *App) fetchTurnDataSync(ctx context.Context, issues []*github.Issue, u
// Use a WaitGroup to track goroutines
var wg sync.WaitGroup

// Process PRs in parallel
// Create semaphore to limit concurrent Turn API calls
sem := make(chan struct{}, maxConcurrentTurnAPICalls)

// Process PRs in parallel with concurrency limit
for _, issue := range issues {
if !issue.IsPullRequest() {
continue
Expand All @@ -400,6 +403,10 @@ func (app *App) fetchTurnDataSync(ctx context.Context, issues []*github.Issue, u
go func(issue *github.Issue) {
defer wg.Done()

// Acquire semaphore
sem <- struct{}{}
defer func() { <-sem }()

url := issue.GetHTMLURL()
updatedAt := issue.GetUpdatedAt().Time

Expand Down Expand Up @@ -490,7 +497,10 @@ func (app *App) fetchTurnDataAsync(ctx context.Context, issues []*github.Issue,
// Use a WaitGroup to track goroutines
var wg sync.WaitGroup

// Process PRs in parallel
// Create semaphore to limit concurrent Turn API calls
sem := make(chan struct{}, maxConcurrentTurnAPICalls)

// Process PRs in parallel with concurrency limit
for _, issue := range issues {
if !issue.IsPullRequest() {
continue
Expand All @@ -500,6 +510,10 @@ func (app *App) fetchTurnDataAsync(ctx context.Context, issues []*github.Issue,
go func(issue *github.Issue) {
defer wg.Done()

// Acquire semaphore
sem <- struct{}{}
defer func() { <-sem }()

url := issue.GetHTMLURL()
updatedAt := issue.GetUpdatedAt().Time

Expand Down
74 changes: 33 additions & 41 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,19 @@ const (
// Retry settings for external API calls - exponential backoff with jitter up to 2 minutes.
maxRetryDelay = 2 * time.Minute
maxRetries = 10 // Should reach 2 minutes with exponential backoff

// Failure thresholds.
minorFailureThreshold = 3
majorFailureThreshold = 10
panicFailureIncrement = 10

// Notification settings.
reminderInterval = 24 * time.Hour
historyRetentionDays = 30

// Turn API settings.
turnAPITimeout = 10 * time.Second
maxConcurrentTurnAPICalls = 10
)

// PR represents a pull request with metadata.
Expand Down Expand Up @@ -270,7 +283,7 @@ func (app *App) updateLoop(ctx context.Context) {

// Update failure count
app.mu.Lock()
app.consecutiveFailures += 10 // Treat panic as critical failure
app.consecutiveFailures += panicFailureIncrement // Treat panic as critical failure
app.mu.Unlock()

// Signal app to quit after panic
Expand Down Expand Up @@ -308,10 +321,6 @@ func (app *App) updatePRs(ctx context.Context) {
app.mu.Unlock()

// Progressive degradation based on failure count
const (
minorFailureThreshold = 3
majorFailureThreshold = 10
)
var title, tooltip string
switch {
case failureCount == 1:
Expand Down Expand Up @@ -475,10 +484,6 @@ func (app *App) updatePRsWithWait(ctx context.Context) {
app.mu.Unlock()

// Progressive degradation based on failure count
const (
minorFailureThreshold = 3
majorFailureThreshold = 10
)
var title, tooltip string
switch {
case failureCount == 1:
Expand Down Expand Up @@ -541,35 +546,6 @@ func (app *App) updatePRsWithWait(ctx context.Context) {
app.checkForNewlyBlockedPRs(ctx)
}

// shouldNotifyForPR determines if we should send a notification for a PR.
func shouldNotifyForPR(
_ string,
isBlocked bool,
prevState NotificationState,
hasHistory bool,
reminderInterval time.Duration,
enableReminders bool,
) (shouldNotify bool, reason string) {
if !hasHistory && isBlocked {
return true, "newly blocked"
}

if !hasHistory {
return false, ""
}

switch {
case isBlocked && !prevState.WasBlocked:
return true, "became blocked"
case !isBlocked && prevState.WasBlocked:
return false, "unblocked"
case isBlocked && prevState.WasBlocked && enableReminders && time.Since(prevState.LastNotified) > reminderInterval:
return true, "reminder"
default:
return false, ""
}
}

// processPRNotifications handles notification logic for a single PR.
func (app *App) processPRNotifications(
ctx context.Context,
Expand All @@ -582,7 +558,24 @@ func (app *App) processPRNotifications(
reminderInterval time.Duration,
) {
prevState, hasHistory := notificationHistory[pr.URL]
shouldNotify, notifyReason := shouldNotifyForPR(pr.URL, isBlocked, prevState, hasHistory, reminderInterval, app.enableReminders)

// Determine if we should notify (inlined from shouldNotifyForPR)
var shouldNotify bool
var notifyReason string
switch {
case !hasHistory && isBlocked:
shouldNotify, notifyReason = true, "newly blocked"
case !hasHistory:
shouldNotify, notifyReason = false, ""
case isBlocked && !prevState.WasBlocked:
shouldNotify, notifyReason = true, "became blocked"
case !isBlocked && prevState.WasBlocked:
shouldNotify, notifyReason = false, "unblocked"
case isBlocked && prevState.WasBlocked && app.enableReminders && time.Since(prevState.LastNotified) > reminderInterval:
shouldNotify, notifyReason = true, "reminder"
default:
shouldNotify, notifyReason = false, ""
}

// Update state for unblocked PRs
if notifyReason == "unblocked" {
Expand Down Expand Up @@ -669,8 +662,7 @@ func (app *App) checkForNewlyBlockedPRs(ctx context.Context) {
now := time.Now()
staleThreshold := now.Add(-stalePRThreshold)

// Reminder interval for re-notifications (24 hours)
const reminderInterval = 24 * time.Hour
// Use reminder interval constant from package level

currentBlockedPRs := make(map[string]bool)
playedIncomingSound := false
Expand Down
58 changes: 58 additions & 0 deletions ratelimit.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// Package main - ratelimit.go provides rate limiting functionality.
package main

import (
"sync"
"time"
)

// RateLimiter implements a simple token bucket rate limiter.
type RateLimiter struct {
lastRefill time.Time
mu sync.Mutex
refillRate time.Duration
tokens int
maxTokens int
}

// NewRateLimiter creates a new rate limiter.
func NewRateLimiter(maxTokens int, refillRate time.Duration) *RateLimiter {
return &RateLimiter{
tokens: maxTokens,
maxTokens: maxTokens,
refillRate: refillRate,
lastRefill: time.Now(),
}
}

// Allow checks if an operation is allowed under the rate limit.
func (r *RateLimiter) Allow() bool {
r.mu.Lock()
defer r.mu.Unlock()

// Refill tokens based on elapsed time
now := time.Now()
elapsed := now.Sub(r.lastRefill)
tokensToAdd := int(elapsed / r.refillRate)

if tokensToAdd > 0 {
r.tokens = minInt(r.tokens+tokensToAdd, r.maxTokens)
r.lastRefill = now
}

// Check if we have tokens available
if r.tokens > 0 {
r.tokens--
return true
}

return false
}

// minInt returns the minimum of two integers.
func minInt(a, b int) int {
if a < b {
return a
}
return b
}
Loading