Skip to content

Commit 769ea8d

Browse files
committed
Migrate to slog library
1 parent 920467c commit 769ea8d

File tree

10 files changed

+220
-194
lines changed

10 files changed

+220
-194
lines changed

cmd/goose/browser_rate_limiter.go

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
package main
33

44
import (
5-
"log"
5+
"log/slog"
66
"sync"
77
"time"
88
)
@@ -20,8 +20,8 @@ type BrowserRateLimiter struct {
2020

2121
// NewBrowserRateLimiter creates a new browser rate limiter.
2222
func NewBrowserRateLimiter(startupDelay time.Duration, maxPerMinute, maxPerDay int) *BrowserRateLimiter {
23-
log.Printf("[BROWSER] Initializing rate limiter: startup_delay=%v, max_per_minute=%d, max_per_day=%d",
24-
startupDelay, maxPerMinute, maxPerDay)
23+
slog.Info("[BROWSER] Initializing rate limiter",
24+
"startup_delay", startupDelay, "max_per_minute", maxPerMinute, "max_per_day", maxPerDay)
2525
return &BrowserRateLimiter{
2626
openedLastMinute: make([]time.Time, 0),
2727
openedToday: make([]time.Time, 0),
@@ -39,14 +39,14 @@ func (b *BrowserRateLimiter) CanOpen(startTime time.Time, prURL string) bool {
3939

4040
// Check if we've already opened this PR
4141
if b.openedPRs[prURL] {
42-
log.Printf("[BROWSER] Skipping auto-open: PR already opened - %s", prURL)
42+
slog.Debug("[BROWSER] Skipping auto-open: PR already opened", "url", prURL)
4343
return false
4444
}
4545

4646
// Check startup delay
4747
if time.Since(startTime) < b.startupDelay {
48-
log.Printf("[BROWSER] Skipping auto-open: within startup delay period (%v remaining)",
49-
b.startupDelay-time.Since(startTime))
48+
slog.Debug("[BROWSER] Skipping auto-open: within startup delay period",
49+
"remaining", b.startupDelay-time.Since(startTime))
5050
return false
5151
}
5252

@@ -57,15 +57,15 @@ func (b *BrowserRateLimiter) CanOpen(startTime time.Time, prURL string) bool {
5757

5858
// Check per-minute limit
5959
if len(b.openedLastMinute) >= b.maxPerMinute {
60-
log.Printf("[BROWSER] Rate limit: already opened %d PRs in the last minute (max: %d)",
61-
len(b.openedLastMinute), b.maxPerMinute)
60+
slog.Debug("[BROWSER] Rate limit: per-minute limit reached",
61+
"opened", len(b.openedLastMinute), "max", b.maxPerMinute)
6262
return false
6363
}
6464

6565
// Check per-day limit
6666
if len(b.openedToday) >= b.maxPerDay {
67-
log.Printf("[BROWSER] Rate limit: already opened %d PRs today (max: %d)",
68-
len(b.openedToday), b.maxPerDay)
67+
slog.Debug("[BROWSER] Rate limit: daily limit reached",
68+
"opened", len(b.openedToday), "max", b.maxPerDay)
6969
return false
7070
}
7171

@@ -82,8 +82,9 @@ func (b *BrowserRateLimiter) RecordOpen(prURL string) {
8282
b.openedToday = append(b.openedToday, now)
8383
b.openedPRs[prURL] = true
8484

85-
log.Printf("[BROWSER] Recorded browser open for %s (minute: %d/%d, today: %d/%d)",
86-
prURL, len(b.openedLastMinute), b.maxPerMinute, len(b.openedToday), b.maxPerDay)
85+
slog.Info("[BROWSER] Recorded browser open",
86+
"url", prURL, "minuteCount", len(b.openedLastMinute), "minuteMax", b.maxPerMinute,
87+
"todayCount", len(b.openedToday), "todayMax", b.maxPerDay)
8788
}
8889

8990
// cleanOldEntries removes entries outside the time windows.
@@ -115,5 +116,5 @@ func (b *BrowserRateLimiter) Reset() {
115116
defer b.mu.Unlock()
116117
previousCount := len(b.openedPRs)
117118
b.openedPRs = make(map[string]bool)
118-
log.Printf("[BROWSER] Rate limiter reset: cleared %d tracked PRs", previousCount)
119+
slog.Info("[BROWSER] Rate limiter reset", "clearedPRs", previousCount)
119120
}

cmd/goose/cache.go

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ import (
77
"encoding/hex"
88
"encoding/json"
99
"fmt"
10-
"log"
10+
"log/slog"
1111
"os"
1212
"path/filepath"
1313
"strings"
@@ -41,10 +41,10 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
4141
if data, readErr := os.ReadFile(cacheFile); readErr == nil {
4242
var entry cacheEntry
4343
if unmarshalErr := json.Unmarshal(data, &entry); unmarshalErr != nil {
44-
log.Printf("Failed to unmarshal cache data for %s: %v", url, unmarshalErr)
44+
slog.Warn("Failed to unmarshal cache data", "url", url, "error", unmarshalErr)
4545
// Remove corrupted cache file
4646
if removeErr := os.Remove(cacheFile); removeErr != nil {
47-
log.Printf("Failed to remove corrupted cache file: %v", removeErr)
47+
slog.Error("Failed to remove corrupted cache file", "error", removeErr)
4848
}
4949
} else if time.Since(entry.CachedAt) < cacheTTL && entry.UpdatedAt.Equal(updatedAt) {
5050
// Check if cache is still valid (2 hour TTL)
@@ -55,9 +55,9 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
5555

5656
// Cache miss, fetch from API
5757
if app.noCache {
58-
log.Printf("Cache bypassed for %s (--no-cache), fetching from Turn API", url)
58+
slog.Debug("Cache bypassed (--no-cache), fetching from Turn API", "url", url)
5959
} else {
60-
log.Printf("Cache miss for %s, fetching from Turn API", url)
60+
slog.Debug("Cache miss, fetching from Turn API", "url", url)
6161
}
6262

6363
// Use exponential backoff with jitter for Turn API calls
@@ -70,7 +70,7 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
7070
var retryErr error
7171
data, retryErr = app.turnClient.Check(turnCtx, url, app.currentUser.GetLogin(), updatedAt)
7272
if retryErr != nil {
73-
log.Printf("Turn API error (will retry): %v", retryErr)
73+
slog.Warn("Turn API error (will retry)", "error", retryErr)
7474
return retryErr
7575
}
7676
return nil
@@ -79,12 +79,12 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
7979
retry.DelayType(retry.CombineDelay(retry.BackOffDelay, retry.RandomDelay)), // Add jitter for better backoff distribution
8080
retry.MaxDelay(maxRetryDelay),
8181
retry.OnRetry(func(n uint, err error) {
82-
log.Printf("[TURN] API retry %d/%d for %s: %v", n+1, maxRetries, url, err)
82+
slog.Warn("[TURN] API retry", "attempt", n+1, "maxRetries", maxRetries, "url", url, "error", err)
8383
}),
8484
retry.Context(ctx),
8585
)
8686
if err != nil {
87-
log.Printf("Turn API error after %d retries (will use PR without metadata): %v", maxRetries, err)
87+
slog.Error("Turn API error after retries (will use PR without metadata)", "maxRetries", maxRetries, "error", err)
8888
return nil, false, err
8989
}
9090

@@ -96,13 +96,13 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
9696
UpdatedAt: updatedAt,
9797
}
9898
if cacheData, marshalErr := json.Marshal(entry); marshalErr != nil {
99-
log.Printf("Failed to marshal cache data for %s: %v", url, marshalErr)
99+
slog.Error("Failed to marshal cache data", "url", url, "error", marshalErr)
100100
} else {
101101
// Ensure cache directory exists with secure permissions
102102
if dirErr := os.MkdirAll(filepath.Dir(cacheFile), 0o700); dirErr != nil {
103-
log.Printf("Failed to create cache directory: %v", dirErr)
103+
slog.Error("Failed to create cache directory", "error", dirErr)
104104
} else if writeErr := os.WriteFile(cacheFile, cacheData, 0o600); writeErr != nil {
105-
log.Printf("Failed to write cache file: %v", writeErr)
105+
slog.Error("Failed to write cache file", "error", writeErr)
106106
}
107107
}
108108
}
@@ -114,7 +114,7 @@ func (app *App) turnData(ctx context.Context, url string, updatedAt time.Time) (
114114
func (app *App) cleanupOldCache() {
115115
entries, err := os.ReadDir(app.cacheDir)
116116
if err != nil {
117-
log.Printf("Failed to read cache directory for cleanup: %v", err)
117+
slog.Error("Failed to read cache directory for cleanup", "error", err)
118118
return
119119
}
120120

@@ -126,7 +126,7 @@ func (app *App) cleanupOldCache() {
126126

127127
info, err := entry.Info()
128128
if err != nil {
129-
log.Printf("Failed to get file info for cache entry %s: %v", entry.Name(), err)
129+
slog.Error("Failed to get file info for cache entry", "entry", entry.Name(), "error", err)
130130
errorCount++
131131
continue
132132
}
@@ -135,7 +135,7 @@ func (app *App) cleanupOldCache() {
135135
if time.Since(info.ModTime()) > cacheCleanupInterval {
136136
filePath := filepath.Join(app.cacheDir, entry.Name())
137137
if removeErr := os.Remove(filePath); removeErr != nil {
138-
log.Printf("Failed to remove old cache file %s: %v", filePath, removeErr)
138+
slog.Error("Failed to remove old cache file", "file", filePath, "error", removeErr)
139139
errorCount++
140140
} else {
141141
cleanupCount++
@@ -144,6 +144,6 @@ func (app *App) cleanupOldCache() {
144144
}
145145

146146
if cleanupCount > 0 || errorCount > 0 {
147-
log.Printf("Cache cleanup completed: %d files removed, %d errors", cleanupCount, errorCount)
147+
slog.Info("Cache cleanup completed", "removed", cleanupCount, "errors", errorCount)
148148
}
149149
}

cmd/goose/github.go

Lines changed: 28 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import (
55
"context"
66
"errors"
77
"fmt"
8-
"log"
8+
"log/slog"
99
"os"
1010
"os/exec"
1111
"path/filepath"
@@ -65,21 +65,21 @@ func (*App) token(ctx context.Context) (string, error) {
6565
if !githubTokenRegex.MatchString(token) {
6666
return "", errors.New("GITHUB_TOKEN has invalid format")
6767
}
68-
log.Println("Using GitHub token from GITHUB_TOKEN environment variable")
68+
slog.Info("Using GitHub token from GITHUB_TOKEN environment variable")
6969
return token, nil
7070
}
7171
// Try to find gh in PATH first
7272
ghPath, err := exec.LookPath("gh")
7373
if err == nil {
74-
log.Printf("Found gh in PATH at: %s", ghPath)
74+
slog.Debug("Found gh in PATH", "path", ghPath)
7575
// Resolve any symlinks to get the real path
7676
if realPath, err := filepath.EvalSymlinks(ghPath); err == nil {
7777
ghPath = realPath
78-
log.Printf("Resolved to: %s", ghPath)
78+
slog.Debug("Resolved gh path", "path", ghPath)
7979
}
8080
} else {
8181
// Fall back to checking common installation paths
82-
log.Print("gh not found in PATH, checking common locations...")
82+
slog.Debug("gh not found in PATH, checking common locations...")
8383
var commonPaths []string
8484
switch runtime.GOOS {
8585
case "windows":
@@ -126,7 +126,7 @@ func (*App) token(ctx context.Context) (string, error) {
126126
continue // Skip empty paths from unset env vars
127127
}
128128
if _, err := os.Stat(path); err == nil {
129-
log.Printf("Found gh at common location: %s", path)
129+
slog.Debug("Found gh at common location", "path", path)
130130
ghPath = path
131131
break
132132
}
@@ -137,7 +137,7 @@ func (*App) token(ctx context.Context) (string, error) {
137137
return "", errors.New("gh CLI not found in PATH or common locations, and GITHUB_TOKEN not set")
138138
}
139139

140-
log.Printf("Executing command: %s auth token", ghPath)
140+
slog.Debug("Executing gh command", "command", ghPath+" auth token")
141141

142142
// Use retry logic for gh CLI command as it may fail temporarily
143143
var token string
@@ -149,7 +149,7 @@ func (*App) token(ctx context.Context) (string, error) {
149149
cmd := exec.CommandContext(cmdCtx, ghPath, "auth", "token")
150150
output, cmdErr := cmd.CombinedOutput()
151151
if cmdErr != nil {
152-
log.Printf("gh command failed (will retry): %v", cmdErr)
152+
slog.Warn("gh command failed (will retry)", "error", cmdErr)
153153
return fmt.Errorf("exec 'gh auth token': %w", cmdErr)
154154
}
155155

@@ -163,15 +163,15 @@ func (*App) token(ctx context.Context) (string, error) {
163163
retry.Attempts(3), // Fewer attempts for local command
164164
retry.Delay(time.Second),
165165
retry.OnRetry(func(n uint, err error) {
166-
log.Printf("[GH CLI] Retry %d/3: %v", n+1, err)
166+
slog.Warn("[GH CLI] Retry attempt", "attempt", n+1, "maxAttempts", 3, "error", err)
167167
}),
168168
retry.Context(ctx),
169169
)
170170
if retryErr != nil {
171171
return "", retryErr
172172
}
173173

174-
log.Println("Successfully obtained GitHub token from gh CLI")
174+
slog.Info("Successfully obtained GitHub token from gh CLI")
175175
return token, nil
176176
}
177177

@@ -199,23 +199,23 @@ func (app *App) executeGitHubQuery(ctx context.Context, query string, opts *gith
199199
case httpStatusForbidden:
200200
if resp.Header.Get("X-Ratelimit-Remaining") == "0" {
201201
resetTime := resp.Header.Get("X-Ratelimit-Reset")
202-
log.Printf("GitHub API rate limited, reset at: %s (will retry)", resetTime)
202+
slog.Warn("GitHub API rate limited (will retry)", "resetTime", resetTime)
203203
return retryErr // Retry on rate limit
204204
}
205-
log.Print("GitHub API access forbidden (check token permissions)")
205+
slog.Error("GitHub API access forbidden (check token permissions)")
206206
return retry.Unrecoverable(fmt.Errorf("github API access forbidden: %w", retryErr))
207207
case httpStatusUnauthorized:
208-
log.Print("GitHub API authentication failed (check token)")
208+
slog.Error("GitHub API authentication failed (check token)")
209209
return retry.Unrecoverable(fmt.Errorf("github API authentication failed: %w", retryErr))
210210
case httpStatusUnprocessable:
211-
log.Printf("GitHub API query invalid: %s", query)
211+
slog.Error("GitHub API query invalid", "query", query)
212212
return retry.Unrecoverable(fmt.Errorf("github API query invalid: %w", retryErr))
213213
default:
214-
log.Printf("GitHub API error (status %d): %v (will retry)", resp.StatusCode, retryErr)
214+
slog.Warn("GitHub API error (will retry)", "statusCode", resp.StatusCode, "error", retryErr)
215215
}
216216
} else {
217217
// Likely network error - retry these
218-
log.Printf("GitHub API network error: %v (will retry)", retryErr)
218+
slog.Warn("GitHub API network error (will retry)", "error", retryErr)
219219
}
220220
return retryErr
221221
}
@@ -225,7 +225,7 @@ func (app *App) executeGitHubQuery(ctx context.Context, query string, opts *gith
225225
retry.DelayType(retry.CombineDelay(retry.BackOffDelay, retry.RandomDelay)), // Add jitter for better backoff distribution
226226
retry.MaxDelay(maxRetryDelay),
227227
retry.OnRetry(func(n uint, err error) {
228-
log.Printf("[GITHUB] Search.Issues retry %d/%d: %v", n+1, maxRetries, err)
228+
slog.Warn("[GITHUB] Search.Issues retry", "attempt", n+1, "maxRetries", maxRetries, "error", err)
229229
}),
230230
retry.Context(ctx),
231231
)
@@ -289,7 +289,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
289289
// Query 1: PRs involving the user
290290
go func() {
291291
query := fmt.Sprintf("is:open is:pr involves:%s archived:false", user)
292-
log.Printf("[GITHUB] Searching for PRs with query: %s", query)
292+
slog.Debug("[GITHUB] Searching for PRs", "query", query)
293293

294294
result, err := app.executeGitHubQuery(ctx, query, opts)
295295
if err != nil {
@@ -302,7 +302,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
302302
// Query 2: PRs in user-owned repos with no reviewers
303303
go func() {
304304
query := fmt.Sprintf("is:open is:pr user:%s review:none archived:false", user)
305-
log.Printf("[GITHUB] Searching for PRs with query: %s", query)
305+
slog.Debug("[GITHUB] Searching for PRs", "query", query)
306306

307307
result, err := app.executeGitHubQuery(ctx, query, opts)
308308
if err != nil {
@@ -320,12 +320,12 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
320320
for range 2 {
321321
result := <-queryResults
322322
if result.err != nil {
323-
log.Printf("[GITHUB] Query failed: %s - %v", result.query, result.err)
323+
slog.Error("[GITHUB] Query failed", "query", result.query, "error", result.err)
324324
queryErrors = append(queryErrors, result.err)
325325
// Continue processing other query results even if one fails
326326
continue
327327
}
328-
log.Printf("[GITHUB] Query completed: %s - found %d PRs", result.query, len(result.issues))
328+
slog.Debug("[GITHUB] Query completed", "query", result.query, "prCount", len(result.issues))
329329

330330
// Deduplicate PRs based on URL
331331
for _, issue := range result.issues {
@@ -336,7 +336,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
336336
}
337337
}
338338
}
339-
log.Printf("[GITHUB] Both searches completed in %v, found %d unique PRs", time.Since(searchStart), len(allIssues))
339+
slog.Info("[GITHUB] Both searches completed", "duration", time.Since(searchStart), "uniquePRs", len(allIssues))
340340

341341
// If both queries failed, return an error
342342
if len(queryErrors) == 2 {
@@ -345,7 +345,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
345345

346346
// Limit PRs for performance
347347
if len(allIssues) > maxPRsToProcess {
348-
log.Printf("Limiting to %d PRs for performance (total: %d)", maxPRsToProcess, len(allIssues))
348+
slog.Info("Limiting PRs for performance", "limit", maxPRsToProcess, "total", len(allIssues))
349349
allIssues = allIssues[:maxPRsToProcess]
350350
}
351351

@@ -361,7 +361,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
361361
if org != "" {
362362
app.mu.Lock()
363363
if !app.seenOrgs[org] {
364-
log.Printf("[ORG] Discovered new organization: %s", org)
364+
slog.Info("[ORG] Discovered new organization", "org", org)
365365
}
366366
app.seenOrgs[org] = true
367367
app.mu.Unlock()
@@ -385,7 +385,7 @@ func (app *App) fetchPRsInternal(ctx context.Context) (incoming []PR, outgoing [
385385
}
386386

387387
// Only log summary, not individual PRs
388-
log.Printf("[GITHUB] Found %d incoming, %d outgoing PRs from GitHub", len(incoming), len(outgoing))
388+
slog.Info("[GITHUB] GitHub PR summary", "incoming", len(incoming), "outgoing", len(outgoing))
389389

390390
// Fetch Turn API data
391391
// Always synchronous now for simplicity - Turn API calls are fast with caching
@@ -459,7 +459,7 @@ func (app *App) fetchTurnDataSync(ctx context.Context, issues []*github.Issue, u
459459
actionReason = action.Reason
460460
// Only log fresh API calls
461461
if !result.wasFromCache {
462-
log.Printf("[TURN] UnblockAction for %s: Reason=%q, Kind=%q", result.url, action.Reason, action.Kind)
462+
slog.Debug("[TURN] UnblockAction", "url", result.url, "reason", action.Reason, "kind", action.Kind)
463463
}
464464
}
465465

@@ -487,6 +487,6 @@ func (app *App) fetchTurnDataSync(ctx context.Context, issues []*github.Issue, u
487487
}
488488
}
489489

490-
log.Printf("[TURN] Turn API queries completed in %v (%d/%d succeeded)",
491-
time.Since(turnStart), turnSuccesses, turnSuccesses+turnFailures)
490+
slog.Info("[TURN] Turn API queries completed",
491+
"duration", time.Since(turnStart), "succeeded", turnSuccesses, "total", turnSuccesses+turnFailures)
492492
}

0 commit comments

Comments
 (0)