diff --git a/.golangci.yml b/.golangci.yml
index 9d62ec6..b455b96 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -122,7 +122,7 @@ linters:
struct-method: false
gocognit:
- min-complexity: 65
+ min-complexity: 55
gocritic:
enable-all: true
@@ -143,11 +143,6 @@ linters:
govet:
enable-all: true
- maintidx:
- # Maintainability Index threshold (default: 20)
- # ExtrapolateFromSamples is a straightforward calculation with clear linear logic
- under: 16
-
godot:
scope: toplevel
@@ -182,7 +177,7 @@ linters:
- name: cyclomatic
disabled: true # prefer maintidx
- name: function-length
- arguments: [150, 300]
+ arguments: [150, 225]
- name: line-length-limit
arguments: [150]
- name: nested-structs
@@ -191,8 +186,8 @@ linters:
arguments: [10]
- name: flag-parameter # fixes are difficult
disabled: true
- - name: use-waitgroup-go
- disabled: true # wg.Add/Done pattern is idiomatic Go
+ - name: bare-return
+ disabled: true
rowserrcheck:
# database/sql is always checked.
diff --git a/Makefile b/Makefile
index e954b66..c4f6e98 100644
--- a/Makefile
+++ b/Makefile
@@ -31,8 +31,28 @@ endif
LINTERS :=
FIXERS :=
+SHELLCHECK_VERSION ?= v0.11.0
+SHELLCHECK_BIN := $(LINT_ROOT)/out/linters/shellcheck-$(SHELLCHECK_VERSION)-$(LINT_ARCH)
+$(SHELLCHECK_BIN):
+ mkdir -p $(LINT_ROOT)/out/linters
+ curl -sSfL -o $@.tar.xz https://github.com/koalaman/shellcheck/releases/download/$(SHELLCHECK_VERSION)/shellcheck-$(SHELLCHECK_VERSION).$(LINT_OS_LOWER).$(LINT_ARCH).tar.xz \
+ || echo "Unable to fetch shellcheck for $(LINT_OS)/$(LINT_ARCH): falling back to locally install"
+ test -f $@.tar.xz \
+ && tar -C $(LINT_ROOT)/out/linters -xJf $@.tar.xz \
+ && mv $(LINT_ROOT)/out/linters/shellcheck-$(SHELLCHECK_VERSION)/shellcheck $@ \
+ || printf "#!/usr/bin/env shellcheck\n" > $@
+ chmod u+x $@
+
+LINTERS += shellcheck-lint
+shellcheck-lint: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh")
+
+FIXERS += shellcheck-fix
+shellcheck-fix: $(SHELLCHECK_BIN)
+ $(SHELLCHECK_BIN) $(shell find . -name "*.sh") -f diff | { read -t 1 line || exit 0; { echo "$$line" && cat; } | git apply -p2; }
+
GOLANGCI_LINT_CONFIG := $(LINT_ROOT)/.golangci.yml
-GOLANGCI_LINT_VERSION ?= v2.5.0
+GOLANGCI_LINT_VERSION ?= v2.7.2
GOLANGCI_LINT_BIN := $(LINT_ROOT)/out/linters/golangci-lint-$(GOLANGCI_LINT_VERSION)-$(LINT_ARCH)
$(GOLANGCI_LINT_BIN):
mkdir -p $(LINT_ROOT)/out/linters
@@ -61,6 +81,32 @@ LINTERS += yamllint-lint
yamllint-lint: $(YAMLLINT_BIN)
PYTHONPATH=$(YAMLLINT_ROOT)/dist $(YAMLLINT_ROOT)/dist/bin/yamllint .
+BIOME_VERSION ?= 2.3.8
+BIOME_BIN := $(LINT_ROOT)/out/linters/biome-$(BIOME_VERSION)-$(LINT_ARCH)
+BIOME_CONFIG := $(LINT_ROOT)/biome.json
+
+# Map architecture names for Biome downloads
+BIOME_ARCH := $(LINT_ARCH)
+ifeq ($(LINT_ARCH),x86_64)
+ BIOME_ARCH := x64
+endif
+
+$(BIOME_BIN):
+ mkdir -p $(LINT_ROOT)/out/linters
+ rm -rf $(LINT_ROOT)/out/linters/biome-*
+ curl -sSfL -o $@ https://github.com/biomejs/biome/releases/download/%40biomejs%2Fbiome%40$(BIOME_VERSION)/biome-$(LINT_OS_LOWER)-$(BIOME_ARCH) \
+ || echo "Unable to fetch biome for $(LINT_OS_LOWER)/$(BIOME_ARCH), falling back to local install"
+ test -f $@ || printf "#!/usr/bin/env biome\n" > $@
+ chmod u+x $@
+
+LINTERS += biome-lint
+biome-lint: $(BIOME_BIN)
+ $(BIOME_BIN) check --config-path=$(BIOME_CONFIG) .
+
+FIXERS += biome-fix
+biome-fix: $(BIOME_BIN)
+ $(BIOME_BIN) check --write --config-path=$(BIOME_CONFIG) .
+
.PHONY: _lint $(LINTERS)
_lint:
@exit_code=0; \
@@ -79,6 +125,7 @@ fix:
# END: lint-install .
+
.PHONY: deploy
deploy:
./hacks/deploy.sh cmd/server/
diff --git a/cmd/prcost/main.go b/cmd/prcost/main.go
index 86d0068..9367479 100644
--- a/cmd/prcost/main.go
+++ b/cmd/prcost/main.go
@@ -590,19 +590,13 @@ func printEfficiency(breakdown *cost.Breakdown) {
fmt.Println(" ┌─────────────────────────────────────────────────────────────┐")
headerText := fmt.Sprintf("DEVELOPMENT EFFICIENCY: %s (%.1f%%) - %s", grade, efficiencyPct, message)
- padding := 60 - len(headerText)
- if padding < 0 {
- padding = 0
- }
+ padding := max(60-len(headerText), 0)
fmt.Printf(" │ %s%*s│\n", headerText, padding, "")
fmt.Println(" └─────────────────────────────────────────────────────────────┘")
fmt.Println(" ┌─────────────────────────────────────────────────────────────┐")
velocityHeader := fmt.Sprintf("MERGE VELOCITY: %s (%s) - %s", velocityGrade, formatTimeUnit(breakdown.PRDuration), velocityMessage)
- velPadding := 60 - len(velocityHeader)
- if velPadding < 0 {
- velPadding = 0
- }
+ velPadding := max(60-len(velocityHeader), 0)
fmt.Printf(" │ %s%*s│\n", velocityHeader, velPadding, "")
fmt.Println(" └─────────────────────────────────────────────────────────────┘")
diff --git a/cmd/prcost/repository.go b/cmd/prcost/repository.go
index c16e772..3274607 100644
--- a/cmd/prcost/repository.go
+++ b/cmd/prcost/repository.go
@@ -15,11 +15,14 @@ import (
// Uses library functions from pkg/github and pkg/cost for fetching, sampling,
// and extrapolation - all functionality is available to external clients.
func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days int, cfg cost.Config, token, dataSource string) error {
+ // Create GitHub client without caching (for CLI)
+ client := github.NewClientWithoutCache()
+
// Calculate since date
since := time.Now().AddDate(0, 0, -days)
// Fetch all PRs modified since the date using library function
- prs, err := github.FetchPRsFromRepo(ctx, owner, repo, since, token, nil)
+ prs, err := client.FetchPRsFromRepo(ctx, owner, repo, since, token, nil)
if err != nil {
return fmt.Errorf("failed to fetch PRs: %w", err)
}
@@ -59,14 +62,14 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days
}
// Convert samples to PRSummaryInfo format
- var summaries []cost.PRSummaryInfo
- for _, pr := range samples {
- summaries = append(summaries, cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Number: pr.Number,
- UpdatedAt: pr.UpdatedAt,
- })
+ summaries := make([]cost.PRSummaryInfo, len(samples))
+ for i := range samples {
+ summaries[i] = cost.PRSummaryInfo{
+ Owner: samples[i].Owner,
+ Repo: samples[i].Repo,
+ Number: samples[i].Number,
+ UpdatedAt: samples[i].UpdatedAt,
+ }
}
// Create fetcher
@@ -93,7 +96,7 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days
totalAuthors := github.CountUniqueAuthors(prs)
// Query for actual count of open PRs (not extrapolated from samples)
- openPRCount, err := github.CountOpenPRsInRepo(ctx, owner, repo, token)
+ openPRCount, err := client.CountOpenPRsInRepo(ctx, owner, repo, token)
if err != nil {
slog.Warn("Failed to count open PRs, using 0", "error", err)
openPRCount = 0
@@ -101,17 +104,17 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
@@ -128,13 +131,16 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days
// Uses library functions from pkg/github and pkg/cost for fetching, sampling,
// and extrapolation - all functionality is available to external clients.
func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, cfg cost.Config, token, dataSource string) error {
+ // Create GitHub client without caching (for CLI)
+ client := github.NewClientWithoutCache()
+
slog.Info("Fetching PR list from organization")
// Calculate since date
since := time.Now().AddDate(0, 0, -days)
// Fetch all PRs across the org modified since the date using library function
- prs, err := github.FetchPRsFromOrg(ctx, org, since, token, nil)
+ prs, err := client.FetchPRsFromOrg(ctx, org, since, token, nil)
if err != nil {
return fmt.Errorf("failed to fetch PRs: %w", err)
}
@@ -174,14 +180,14 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int,
}
// Convert samples to PRSummaryInfo format
- var summaries []cost.PRSummaryInfo
- for _, pr := range samples {
- summaries = append(summaries, cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Number: pr.Number,
- UpdatedAt: pr.UpdatedAt,
- })
+ summaries := make([]cost.PRSummaryInfo, len(samples))
+ for i := range samples {
+ summaries[i] = cost.PRSummaryInfo{
+ Owner: samples[i].Owner,
+ Repo: samples[i].Repo,
+ Number: samples[i].Number,
+ UpdatedAt: samples[i].UpdatedAt,
+ }
}
// Create fetcher
@@ -208,7 +214,7 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int,
totalAuthors := github.CountUniqueAuthors(prs)
// Count open PRs across the entire organization with a single query
- totalOpenPRs, err := github.CountOpenPRsInOrg(ctx, org, token)
+ totalOpenPRs, err := client.CountOpenPRsInOrg(ctx, org, token)
if err != nil {
slog.Warn("Failed to count open PRs in organization, using 0", "error", err)
totalOpenPRs = 0
@@ -217,17 +223,17 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int,
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
diff --git a/cmd/server/main.go b/cmd/server/main.go
index ad41a04..d5d145c 100644
--- a/cmd/server/main.go
+++ b/cmd/server/main.go
@@ -107,6 +107,10 @@ func main() {
// Create server
prcostServer := server.New()
+ if prcostServer == nil {
+ logger.ErrorContext(ctx, "failed to initialize server (check cache configuration)")
+ os.Exit(1)
+ }
prcostServer.SetCommit(GitCommit)
prcostServer.SetCORSConfig(*corsOrigins, *allowAllCors)
prcostServer.SetRateLimit(*rateLimit, *rateBurst)
diff --git a/go.mod b/go.mod
index 8b04883..da65e67 100644
--- a/go.mod
+++ b/go.mod
@@ -1,13 +1,22 @@
module github.com/codeGROOVE-dev/prcost
-go 1.25.3
+go 1.25.4
require (
- github.com/codeGROOVE-dev/ds9 v0.6.0
+ github.com/codeGROOVE-dev/fido v1.10.0
+ github.com/codeGROOVE-dev/fido/pkg/store/cloudrun v1.10.0
github.com/codeGROOVE-dev/gsm v0.0.0-20251019065141-833fe2363d22
- github.com/codeGROOVE-dev/prx v0.0.0-20251030022101-ff906928a1e4
- github.com/codeGROOVE-dev/turnclient v0.0.0-20251030022425-bc3b14acf75e
+ github.com/codeGROOVE-dev/prx v0.0.0-20251109164430-90488144076d
+ github.com/codeGROOVE-dev/turnclient v0.0.0-20251107215141-ee43672b3dc7
golang.org/x/time v0.14.0
)
-require github.com/codeGROOVE-dev/retry v1.3.0 // indirect
+require (
+ github.com/codeGROOVE-dev/ds9 v0.8.0 // indirect
+ github.com/codeGROOVE-dev/fido/pkg/store/compress v1.10.0 // indirect
+ github.com/codeGROOVE-dev/fido/pkg/store/datastore v1.10.0 // indirect
+ github.com/codeGROOVE-dev/fido/pkg/store/localfs v1.10.0 // indirect
+ github.com/codeGROOVE-dev/retry v1.3.0 // indirect
+ github.com/klauspost/compress v1.18.2 // indirect
+ github.com/puzpuzpuz/xsync/v4 v4.2.0 // indirect
+)
diff --git a/go.sum b/go.sum
index 75f4de4..7949921 100644
--- a/go.sum
+++ b/go.sum
@@ -1,12 +1,28 @@
-github.com/codeGROOVE-dev/ds9 v0.6.0 h1:JG7vBH17UAKaVoeQilrIvA1I0fg3iNbdUMBSDS7ixgI=
-github.com/codeGROOVE-dev/ds9 v0.6.0/go.mod h1:0UDipxF1DADfqM5GtjefgB2u+EXdDgOKmxVvrSGLHoM=
+github.com/codeGROOVE-dev/ds9 v0.8.0 h1:A23VvL1YzUBZyXNYmF5u0R6nPcxQitPeLo8FFk6OiUs=
+github.com/codeGROOVE-dev/ds9 v0.8.0/go.mod h1:0UDipxF1DADfqM5GtjefgB2u+EXdDgOKmxVvrSGLHoM=
+github.com/codeGROOVE-dev/fido v1.10.0 h1:i4Wb6LDd5nD/4Fnp47KAVUVhG1O1mN5jSRbCYPpBYjw=
+github.com/codeGROOVE-dev/fido v1.10.0/go.mod h1:/mqfMeKCTYTGt/Y0cWm6gh8gYBKG1w8xBsTDmu+A/pU=
+github.com/codeGROOVE-dev/fido/pkg/store/cloudrun v1.10.0 h1:0Wvs3JE+TI8GsEkh0jg0SglyFyIkBewPSl0PTUSVqEo=
+github.com/codeGROOVE-dev/fido/pkg/store/cloudrun v1.10.0/go.mod h1:MaxO6QGv89FrZB1D+stiZjRcbaMUfiw7yYGkaqOoJ2k=
+github.com/codeGROOVE-dev/fido/pkg/store/compress v1.10.0 h1:W3AYtR6eyPHQ8QhTsuqjNZYWk/Fev0cJiAiuw04uhlk=
+github.com/codeGROOVE-dev/fido/pkg/store/compress v1.10.0/go.mod h1:0hFYQ8Y6jfrYuJb8eBimYz66tg7DDuVWbZqaI944LQM=
+github.com/codeGROOVE-dev/fido/pkg/store/datastore v1.10.0 h1:vCsLeESGQvW7F8pJJimZhRjzWmrQg1WZgT22om9fT/Q=
+github.com/codeGROOVE-dev/fido/pkg/store/datastore v1.10.0/go.mod h1:LtpO9TUi92D7uLBXJu+kLWVpRmEtVRAWVB2EdzNU0JQ=
+github.com/codeGROOVE-dev/fido/pkg/store/localfs v1.10.0 h1:oaPwuHHBuzhsWnPm7UCxgwjz7+jG3O0JenSSgPSwqv8=
+github.com/codeGROOVE-dev/fido/pkg/store/localfs v1.10.0/go.mod h1:zUGzODSWykosAod0IHycxdxUOMcd2eVqd6eUdOsU73E=
github.com/codeGROOVE-dev/gsm v0.0.0-20251019065141-833fe2363d22 h1:gtN3rOc6YspO646BkcOxBhPjEqKUz+jl175jIqglfDg=
github.com/codeGROOVE-dev/gsm v0.0.0-20251019065141-833fe2363d22/go.mod h1:KV+w19ubP32PxZPE1hOtlCpTaNpF0Bpb32w5djO8UTg=
-github.com/codeGROOVE-dev/prx v0.0.0-20251030022101-ff906928a1e4 h1:DSuoUwP3oyR4cHrX0cUh9c7CtYjXNIcyCmqpIwHilIU=
-github.com/codeGROOVE-dev/prx v0.0.0-20251030022101-ff906928a1e4/go.mod h1:FEy3gz9IYDXWnKWkoDSL+pWu6rujxbBSrF4w5A8QSK0=
+github.com/codeGROOVE-dev/prx v0.0.0-20251109164430-90488144076d h1:KKt93PVYR9Uga8uLPq0HoNlXVW3BTPHGBBxEb5YBxf4=
+github.com/codeGROOVE-dev/prx v0.0.0-20251109164430-90488144076d/go.mod h1:FEy3gz9IYDXWnKWkoDSL+pWu6rujxbBSrF4w5A8QSK0=
github.com/codeGROOVE-dev/retry v1.3.0 h1:/+ipAWRJLL6y1R1vprYo0FSjSBvH6fE5j9LKXjpD54g=
github.com/codeGROOVE-dev/retry v1.3.0/go.mod h1:8OgefgV1XP7lzX2PdKlCXILsYKuz6b4ZpHa/20iLi8E=
-github.com/codeGROOVE-dev/turnclient v0.0.0-20251030022425-bc3b14acf75e h1:WXHdC8o5KmP5CwkQRiGVywYzsj93fjkRPq7clhfZPq0=
-github.com/codeGROOVE-dev/turnclient v0.0.0-20251030022425-bc3b14acf75e/go.mod h1:dVS3MlJDgL6WkfurJAyS7I9Fe1yxxoxxarjVifY5bIo=
+github.com/codeGROOVE-dev/turnclient v0.0.0-20251107215141-ee43672b3dc7 h1:183q0bj2y/9hh/K0HZvDXI6sG7liYSRcQVgFx0GY+UA=
+github.com/codeGROOVE-dev/turnclient v0.0.0-20251107215141-ee43672b3dc7/go.mod h1:dVS3MlJDgL6WkfurJAyS7I9Fe1yxxoxxarjVifY5bIo=
+github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk=
+github.com/klauspost/compress v1.18.2/go.mod h1:R0h/fSBs8DE4ENlcrlib3PsXS61voFxhIs2DeRhCvJ4=
+github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU=
+github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
+github.com/puzpuzpuz/xsync/v4 v4.2.0 h1:dlxm77dZj2c3rxq0/XNvvUKISAmovoXF4a4qM6Wvkr0=
+github.com/puzpuzpuz/xsync/v4 v4.2.0/go.mod h1:VJDmTCJMBt8igNxnkQd86r+8KUeN1quSfNKu5bLYFQo=
golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=
golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=
diff --git a/hacks/debug-sessions/main.go b/hacks/debug-sessions/main.go
index 3dc4849..9fb0ed1 100644
--- a/hacks/debug-sessions/main.go
+++ b/hacks/debug-sessions/main.go
@@ -64,7 +64,7 @@ func main() {
events := make([]cost.ParticipantEvent, len(authorEvents))
copy(events, authorEvents)
// Sort manually
- for i := 0; i < len(events); i++ {
+ for i := range events {
for j := i + 1; j < len(events); j++ {
if events[j].Timestamp.Before(events[i].Timestamp) {
events[i], events[j] = events[j], events[i]
@@ -105,10 +105,7 @@ func main() {
// Gaps between events
for j := start; j < end; j++ {
gap := events[j+1].Timestamp.Sub(events[j].Timestamp)
- counted := gap
- if gap > eventDur {
- counted = eventDur
- }
+ counted := min(gap, eventDur)
totalGitHub += counted
fmt.Printf(" Gap %d->%d: %v (actual: %v)\n", j-start, j-start+1, counted, gap)
}
diff --git a/hacks/test-sessions/main.go b/hacks/test-sessions/main.go
index 032ed30..6785122 100644
--- a/hacks/test-sessions/main.go
+++ b/hacks/test-sessions/main.go
@@ -40,7 +40,7 @@ func main() {
copy(sorted, authorEvents)
// Simple sort
- for i := 0; i < len(sorted); i++ {
+ for i := range sorted {
for j := i + 1; j < len(sorted); j++ {
if sorted[j].Timestamp.Before(sorted[i].Timestamp) {
sorted[i], sorted[j] = sorted[j], sorted[i]
diff --git a/internal/server/integration_test.go b/internal/server/integration_test.go
index 05c56a7..8933707 100644
--- a/internal/server/integration_test.go
+++ b/internal/server/integration_test.go
@@ -74,8 +74,8 @@ func TestOrgSampleStreamIntegration(t *testing.T) {
for scanner.Scan() {
line := scanner.Text()
- if strings.HasPrefix(line, "data: ") {
- currentData = strings.TrimPrefix(line, "data: ")
+ if after, ok := strings.CutPrefix(line, "data: "); ok {
+ currentData = after
} else if line == "" && currentData != "" {
// Empty line marks end of SSE event
var event ProgressUpdate
diff --git a/internal/server/server.go b/internal/server/server.go
index 7bea048..dc8a9d0 100644
--- a/internal/server/server.go
+++ b/internal/server/server.go
@@ -20,7 +20,8 @@ import (
"sync"
"time"
- "github.com/codeGROOVE-dev/ds9/pkg/datastore"
+ "github.com/codeGROOVE-dev/fido"
+ "github.com/codeGROOVE-dev/fido/pkg/store/cloudrun"
"github.com/codeGROOVE-dev/gsm"
"github.com/codeGROOVE-dev/prcost/pkg/cost"
"github.com/codeGROOVE-dev/prcost/pkg/github"
@@ -55,38 +56,6 @@ var tokenPattern = regexp.MustCompile(
//go:embed static/*
var staticFS embed.FS
-// cacheEntry holds cached data for in-memory cache.
-// No TTL needed - Cloud Run kills processes frequently, providing natural cache invalidation.
-type cacheEntry struct {
- data any
-}
-
-// prDataCacheEntity represents a cached PR data entry in DataStore with TTL.
-type prDataCacheEntity struct {
- Data string `datastore:"data,noindex"` // JSON-encoded cost.PRData
- CachedAt time.Time `datastore:"cached_at"` // When this was cached
- ExpiresAt time.Time `datastore:"expires_at"` // When this expires (1 hour from CachedAt)
- URL string `datastore:"url"` // PR URL for debugging
-}
-
-// prQueryCacheEntity represents a cached PR query result in DataStore with TTL.
-type prQueryCacheEntity struct {
- Data string `datastore:"data,noindex"` // JSON-encoded []github.PRSummary
- CachedAt time.Time `datastore:"cached_at"` // When this was cached
- ExpiresAt time.Time `datastore:"expires_at"` // When this expires (varies by type)
- QueryType string `datastore:"query_type"` // "repo" or "org"
- QueryKey string `datastore:"query_key"` // Full query key for debugging
-}
-
-// calcResultCacheEntity represents a cached calculation result in DataStore with TTL.
-type calcResultCacheEntity struct {
- Data string `datastore:"data,noindex"` // JSON-encoded cost.Breakdown
- CachedAt time.Time `datastore:"cached_at"` // When this was cached
- ExpiresAt time.Time `datastore:"expires_at"` // When this expires
- URL string `datastore:"url"` // PR URL for debugging
- ConfigKey string `datastore:"config_key"` // Config hash for debugging
-}
-
// Server handles HTTP requests for the PR Cost API.
//
//nolint:govet // fieldalignment: struct field ordering optimized for readability over memory
@@ -109,15 +78,11 @@ type Server struct {
allowAllCors bool
validateTokens bool
r2rCallout bool
- // In-memory caching for PR queries and data.
- prQueryCache map[string]*cacheEntry
- prDataCache map[string]*cacheEntry
- calcResultCache map[string]*cacheEntry
- prQueryCacheMu sync.RWMutex
- prDataCacheMu sync.RWMutex
- calcResultCacheMu sync.RWMutex
- // DataStore client for persistent caching (nil if not enabled).
- dsClient *datastore.Client
+ // Caching using fido (memory + optional persistence).
+ githubCache *fido.TieredCache[string, any] // Unified 72h cache for all GitHub queries
+ prDataCache *fido.TieredCache[string, cost.PRData] // 6-day cache for PR detail data
+ calcResultCache *fido.TieredCache[string, cost.Breakdown] // 6-day cache for calculation results
+ githubClient *github.Client // Cached GitHub API client
}
// CalculateRequest represents a request to calculate PR costs.
@@ -208,6 +173,61 @@ func New() *Server {
logger.InfoContext(ctx, "Server initialized with CSRF protection enabled")
+ // Get database name from environment, default to "prcost" if not set.
+ dbName := os.Getenv("DATASTORE_DB")
+ if dbName == "" {
+ dbName = "prcost"
+ }
+
+ // Initialize caches with fido (automatically handles memory + persistence).
+ // cloudrun.New uses Cloud Datastore when K_SERVICE is set (Cloud Run/Knative), local files otherwise.
+ // Single unified cache with 72-hour TTL for all GitHub query results.
+ githubPersist, err := cloudrun.New[string, any](ctx, dbName)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize GitHub cache persistence", "error", err, "db", dbName)
+ return nil
+ }
+ githubCache, err := fido.NewTiered[string, any](githubPersist,
+ fido.TTL(72*time.Hour),
+ fido.Size(2000),
+ )
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize GitHub cache", "error", err)
+ return nil
+ }
+
+ // Separate caches for non-GitHub data with 6-day TTL
+ prDataPersist, err := cloudrun.New[string, cost.PRData](ctx, dbName)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize PR data cache persistence", "error", err, "db", dbName)
+ return nil
+ }
+ prDataCache, err := fido.NewTiered[string, cost.PRData](prDataPersist,
+ fido.TTL(6*24*time.Hour),
+ fido.Size(1000),
+ )
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize PR data cache", "error", err)
+ return nil
+ }
+
+ calcResultPersist, err := cloudrun.New[string, cost.Breakdown](ctx, dbName)
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize calc result cache persistence", "error", err, "db", dbName)
+ return nil
+ }
+ calcResultCache, err := fido.NewTiered[string, cost.Breakdown](calcResultPersist,
+ fido.TTL(6*24*time.Hour),
+ fido.Size(1000),
+ )
+ if err != nil {
+ logger.ErrorContext(ctx, "Failed to initialize calc result cache", "error", err)
+ return nil
+ }
+
+ // Simple cache adapter for GitHub client
+ simpleGitHubCache := &simpleCache{cache: githubCache, logger: logger}
+
server := &Server{
logger: logger,
serverCommit: "", // Will be set via build flags
@@ -217,9 +237,10 @@ func New() *Server {
ipLimiters: make(map[string]*rate.Limiter),
rateLimit: DefaultRateLimit,
rateBurst: DefaultRateBurst,
- prQueryCache: make(map[string]*cacheEntry),
- prDataCache: make(map[string]*cacheEntry),
- calcResultCache: make(map[string]*cacheEntry),
+ githubCache: githubCache,
+ prDataCache: prDataCache,
+ calcResultCache: calcResultCache,
+ githubClient: github.NewClient(simpleGitHubCache),
}
// Load GitHub token at startup and cache in memory for performance and billing.
@@ -231,27 +252,6 @@ func New() *Server {
logger.InfoContext(ctx, "No fallback token available - requests must provide Authorization header")
}
- // Note: We don't clear caches periodically because:
- // - PR data is immutable (closed PRs don't change)
- // - Memory usage is bounded by request patterns
- // - Cloud Run instances are ephemeral and restart frequently anyway
- // If needed in the future, implement LRU eviction with size limits instead of time-based clearing
-
- // Initialize DataStore client if DATASTORE_DB is set (persistent caching across restarts).
- if dbID := os.Getenv("DATASTORE_DB"); dbID != "" {
- dsClient, err := datastore.NewClientWithDatabase(ctx, "", dbID)
- if err != nil {
- logger.WarnContext(ctx, "Failed to initialize DataStore client - persistent caching disabled",
- "database_id", dbID, "error", err)
- } else {
- server.dsClient = dsClient
- logger.InfoContext(ctx, "DataStore persistent caching enabled",
- "database_id", dbID)
- }
- } else {
- logger.InfoContext(ctx, "DataStore persistent caching disabled (DATASTORE_DB not set)")
- }
-
return server
}
@@ -273,7 +273,7 @@ func (s *Server) SetCORSConfig(origins string, allowAll bool) {
s.allowAllCors = false
if origins != "" {
- for _, origin := range strings.Split(origins, ",") {
+ for origin := range strings.SplitSeq(origins, ",") {
origin = strings.TrimSpace(origin)
// Validate wildcard patterns: must be *.domain.com or https://*.domain.com
@@ -321,23 +321,23 @@ func (s *Server) SetR2RCallout(enabled bool) {
// limiter returns a rate limiter for the given IP address.
func (s *Server) limiter(ctx context.Context, ip string) *rate.Limiter {
s.ipLimitersMu.RLock()
- limiter, exists := s.ipLimiters[ip]
+ lim, ok := s.ipLimiters[ip]
s.ipLimitersMu.RUnlock()
- if exists {
- return limiter
+ if ok {
+ return lim
}
s.ipLimitersMu.Lock()
defer s.ipLimitersMu.Unlock()
// Double-check after acquiring write lock.
- if existingLimiter, exists := s.ipLimiters[ip]; exists {
- return existingLimiter
+ if l, ok := s.ipLimiters[ip]; ok {
+ return l
}
- limiter = rate.NewLimiter(rate.Limit(s.rateLimit), s.rateBurst)
- s.ipLimiters[ip] = limiter
+ lim = rate.NewLimiter(rate.Limit(s.rateLimit), s.rateBurst)
+ s.ipLimiters[ip] = lim
// Cleanup old limiters if map grows too large (prevent memory leak).
const maxLimiters = 10000
@@ -354,204 +354,58 @@ func (s *Server) limiter(ctx context.Context, ip string) *rate.Limiter {
s.logger.InfoContext(ctx, "Cleaned up old IP rate limiters", "removed", count, "remaining", len(s.ipLimiters))
}
- return limiter
+ return lim
}
-// cachedPRQuery retrieves cached PR query results from memory first, then DataStore as fallback.
-func (s *Server) cachedPRQuery(ctx context.Context, key string) ([]github.PRSummary, bool) {
- // Check in-memory cache first (fast path).
- s.prQueryCacheMu.RLock()
- entry, exists := s.prQueryCache[key]
- s.prQueryCacheMu.RUnlock()
-
- if exists {
- prs, ok := entry.data.([]github.PRSummary)
- if ok {
- s.logger.DebugContext(ctx, "PR query cache hit (memory)", "key", key)
- return prs, true
- }
- }
-
- // Memory miss - try DataStore if available.
- if s.dsClient == nil {
- return nil, false
- }
+// simpleCache implements github.Cache interface using a single fido.TieredCache[string, any].
+type simpleCache struct {
+ cache *fido.TieredCache[string, any]
+ logger *slog.Logger
+}
- dsKey := datastore.NameKey("PRQueryCache", key, nil)
- var entity prQueryCacheEntity
- err := s.dsClient.Get(ctx, dsKey, &entity)
+func (s *simpleCache) Get(ctx context.Context, key string) (any, bool) {
+ key = sanitizeCacheKey(key)
+ val, found, err := s.cache.Get(ctx, key)
if err != nil {
- if !errors.Is(err, datastore.ErrNoSuchEntity) {
- s.logger.WarnContext(ctx, "DataStore cache read failed", "key", key, "error", err)
- }
+ s.logger.WarnContext(ctx, "Cache error", "key", key, "error", err)
return nil, false
}
-
- // Check if expired (TTL varies by query type).
- if time.Now().After(entity.ExpiresAt) {
- s.logger.DebugContext(ctx, "DataStore cache entry expired", "key", key, "expires_at", entity.ExpiresAt)
- return nil, false
- }
-
- // Deserialize the cached data.
- var prs []github.PRSummary
- if err := json.Unmarshal([]byte(entity.Data), &prs); err != nil {
- s.logger.WarnContext(ctx, "Failed to deserialize cached PR query", "key", key, "error", err)
- return nil, false
- }
-
- s.logger.InfoContext(ctx, "PR query cache hit (DataStore)",
- "key", key, "query_type", entity.QueryType, "cached_at", entity.CachedAt, "pr_count", len(prs))
-
- // Populate in-memory cache for faster subsequent access.
- s.prQueryCacheMu.Lock()
- s.prQueryCache[key] = &cacheEntry{data: prs}
- s.prQueryCacheMu.Unlock()
-
- return prs, true
+ return val, found
}
-// cachePRQuery stores PR query results in both memory and DataStore caches.
-func (s *Server) cachePRQuery(ctx context.Context, key string, prs []github.PRSummary) {
- // Write to in-memory cache first (fast path).
- s.prQueryCacheMu.Lock()
- s.prQueryCache[key] = &cacheEntry{data: prs}
- s.prQueryCacheMu.Unlock()
-
- // Write to DataStore if available (persistent cache).
- if s.dsClient == nil {
- return
- }
-
- // Serialize the PR query results.
- dataJSON, err := json.Marshal(prs)
- if err != nil {
- s.logger.WarnContext(ctx, "Failed to serialize PR query for DataStore", "key", key, "error", err)
- return
- }
-
- // Determine query type and TTL from key format.
- var queryType string
- var ttl time.Duration
- switch {
- case strings.HasPrefix(key, "repo:"):
- queryType = "repo"
- ttl = 60 * time.Hour // 60 hours for repo queries
- case strings.HasPrefix(key, "org:"):
- queryType = "org"
- ttl = 60 * time.Hour // 60 hours for org queries
- default:
- s.logger.WarnContext(ctx, "Unknown query type for key, using default TTL", "key", key)
- queryType = "unknown"
- ttl = 60 * time.Hour // Default to 60 hours
- }
-
- now := time.Now()
- entity := prQueryCacheEntity{
- Data: string(dataJSON),
- CachedAt: now,
- ExpiresAt: now.Add(ttl),
- QueryType: queryType,
- QueryKey: key,
- }
-
- dsKey := datastore.NameKey("PRQueryCache", key, nil)
- if _, err := s.dsClient.Put(ctx, dsKey, &entity); err != nil {
- s.logger.WarnContext(ctx, "Failed to write PR query to DataStore", "key", key, "error", err)
- return
+func (s *simpleCache) Set(ctx context.Context, key string, value any) {
+ key = sanitizeCacheKey(key)
+ if err := s.cache.Set(ctx, key, value); err != nil {
+ s.logger.WarnContext(ctx, "Failed to cache value", "key", key, "error", err)
}
-
- s.logger.DebugContext(ctx, "PR query cached to DataStore",
- "key", key, "query_type", queryType, "ttl", ttl, "expires_at", entity.ExpiresAt, "pr_count", len(prs))
}
-// cachedPRData retrieves cached PR data from memory first, then DataStore as fallback.
+// cachedPRData retrieves cached PR data using fido.
func (s *Server) cachedPRData(ctx context.Context, key string) (cost.PRData, bool) {
- // Check in-memory cache first (fast path).
- s.prDataCacheMu.RLock()
- entry, exists := s.prDataCache[key]
- s.prDataCacheMu.RUnlock()
-
- if exists {
- prData, ok := entry.data.(cost.PRData)
- if ok {
- s.logger.DebugContext(ctx, "PR data cache hit (memory)", "key", key)
- return prData, true
- }
- }
-
- // Memory miss - try DataStore if available.
- if s.dsClient == nil {
- return cost.PRData{}, false
- }
-
- dsKey := datastore.NameKey("PRDataCache", key, nil)
- var entity prDataCacheEntity
- err := s.dsClient.Get(ctx, dsKey, &entity)
+ key = sanitizeCacheKey(key)
+ prData, found, err := s.prDataCache.Get(ctx, key)
if err != nil {
- if !errors.Is(err, datastore.ErrNoSuchEntity) {
- s.logger.WarnContext(ctx, "DataStore cache read failed", "key", key, "error", err)
- }
- return cost.PRData{}, false
- }
-
- // Check if expired (1 hour TTL for PRs).
- if time.Now().After(entity.ExpiresAt) {
- s.logger.DebugContext(ctx, "DataStore cache entry expired", "key", key, "expires_at", entity.ExpiresAt)
+ s.logger.WarnContext(ctx, "PR data cache error", "key", key, "error", err)
return cost.PRData{}, false
}
-
- // Deserialize the cached data.
- var prData cost.PRData
- if err := json.Unmarshal([]byte(entity.Data), &prData); err != nil {
- s.logger.WarnContext(ctx, "Failed to deserialize cached PR data", "key", key, "error", err)
- return cost.PRData{}, false
- }
-
- s.logger.InfoContext(ctx, "PR data cache hit (DataStore)", "key", key, "cached_at", entity.CachedAt)
-
- // Populate in-memory cache for faster subsequent access.
- s.prDataCacheMu.Lock()
- s.prDataCache[key] = &cacheEntry{data: prData}
- s.prDataCacheMu.Unlock()
-
- return prData, true
+ return prData, found
}
-// cachePRData stores PR data in both memory and DataStore caches.
+// cachePRData stores PR data using fido.
func (s *Server) cachePRData(ctx context.Context, key string, prData cost.PRData) {
- // Write to in-memory cache first (fast path).
- s.prDataCacheMu.Lock()
- s.prDataCache[key] = &cacheEntry{data: prData}
- s.prDataCacheMu.Unlock()
-
- // Write to DataStore if available (persistent cache).
- if s.dsClient == nil {
- return
- }
-
- // Serialize the PR data.
- dataJSON, err := json.Marshal(prData)
- if err != nil {
- s.logger.WarnContext(ctx, "Failed to serialize PR data for DataStore", "key", key, "error", err)
- return
- }
-
- now := time.Now()
- entity := prDataCacheEntity{
- Data: string(dataJSON),
- CachedAt: now,
- ExpiresAt: now.Add(1 * time.Hour), // 1 hour TTL for PRs
- URL: key,
- }
-
- dsKey := datastore.NameKey("PRDataCache", key, nil)
- if _, err := s.dsClient.Put(ctx, dsKey, &entity); err != nil {
- s.logger.WarnContext(ctx, "Failed to write PR data to DataStore", "key", key, "error", err)
- return
+ key = sanitizeCacheKey(key)
+ if err := s.prDataCache.Set(ctx, key, prData); err != nil {
+ s.logger.WarnContext(ctx, "Failed to cache PR data", "key", key, "error", err)
}
+}
- s.logger.DebugContext(ctx, "PR data cached to DataStore", "key", key, "expires_at", entity.ExpiresAt)
+// sanitizeCacheKey replaces characters not allowed by fido persistence layer.
+// fido only allows: alphanumeric, dash, underscore, period, colon.
+// We replace: / with _ and = with -.
+func sanitizeCacheKey(key string) string {
+ key = strings.ReplaceAll(key, "/", "_")
+ key = strings.ReplaceAll(key, "=", "-")
+ return key
}
// configHash creates a deterministic hash key for a cost.Config.
@@ -568,94 +422,23 @@ func configHash(cfg cost.Config) string {
cfg.DeliveryDelayFactor)
}
-// cachedCalcResult retrieves cached calculation result from memory first, then DataStore as fallback.
+// cachedCalcResult retrieves cached calculation result using fido.
func (s *Server) cachedCalcResult(ctx context.Context, prURL string, cfg cost.Config) (cost.Breakdown, bool) {
- key := fmt.Sprintf("calc:%s:%s", prURL, configHash(cfg))
-
- // Check in-memory cache first (fast path).
- s.calcResultCacheMu.RLock()
- entry, exists := s.calcResultCache[key]
- s.calcResultCacheMu.RUnlock()
-
- if exists {
- breakdown, ok := entry.data.(cost.Breakdown)
- if ok {
- return breakdown, true
- }
- }
-
- // Memory miss - try DataStore if available.
- if s.dsClient == nil {
- return cost.Breakdown{}, false
- }
-
- dsKey := datastore.NameKey("CalcResultCache", key, nil)
- var entity calcResultCacheEntity
- err := s.dsClient.Get(ctx, dsKey, &entity)
+ key := sanitizeCacheKey(fmt.Sprintf("calc:%s:%s", prURL, configHash(cfg)))
+ breakdown, found, err := s.calcResultCache.Get(ctx, key)
if err != nil {
- if !errors.Is(err, datastore.ErrNoSuchEntity) {
- s.logger.WarnContext(ctx, "DataStore calc cache read failed", "key", key, "error", err)
- }
+ s.logger.WarnContext(ctx, "Calc result cache error", "key", key, "error", err)
return cost.Breakdown{}, false
}
-
- // Check if expired.
- if time.Now().After(entity.ExpiresAt) {
- return cost.Breakdown{}, false
- }
-
- // Deserialize the cached data.
- var breakdown cost.Breakdown
- if err := json.Unmarshal([]byte(entity.Data), &breakdown); err != nil {
- s.logger.WarnContext(ctx, "Failed to deserialize cached calc result", "key", key, "error", err)
- return cost.Breakdown{}, false
- }
-
- // Populate in-memory cache for faster subsequent access.
- s.calcResultCacheMu.Lock()
- s.calcResultCache[key] = &cacheEntry{data: breakdown}
- s.calcResultCacheMu.Unlock()
-
- return breakdown, true
+ return breakdown, found
}
-// cacheCalcResult stores calculation result in both memory and DataStore caches.
-func (s *Server) cacheCalcResult(ctx context.Context, prURL string, cfg cost.Config, b *cost.Breakdown, ttl time.Duration) {
- key := fmt.Sprintf("calc:%s:%s", prURL, configHash(cfg))
-
- // Write to in-memory cache first (fast path).
- s.calcResultCacheMu.Lock()
- s.calcResultCache[key] = &cacheEntry{data: *b}
- s.calcResultCacheMu.Unlock()
-
- // Write to DataStore if available (persistent cache).
- if s.dsClient == nil {
- return
- }
-
- // Serialize the calculation result.
- dataJSON, err := json.Marshal(b)
- if err != nil {
- s.logger.WarnContext(ctx, "Failed to serialize calc result for DataStore", "key", key, "error", err)
- return
- }
-
- now := time.Now()
- entity := calcResultCacheEntity{
- Data: string(dataJSON),
- CachedAt: now,
- ExpiresAt: now.Add(ttl),
- URL: prURL,
- ConfigKey: configHash(cfg),
+// cacheCalcResult stores calculation result using fido.
+func (s *Server) cacheCalcResult(ctx context.Context, prURL string, cfg cost.Config, b *cost.Breakdown) {
+ key := sanitizeCacheKey(fmt.Sprintf("calc:%s:%s", prURL, configHash(cfg)))
+ if err := s.calcResultCache.Set(ctx, key, *b); err != nil {
+ s.logger.WarnContext(ctx, "Failed to cache calc result", "key", key, "error", err)
}
-
- dsKey := datastore.NameKey("CalcResultCache", key, nil)
- if _, err := s.dsClient.Put(ctx, dsKey, &entity); err != nil {
- s.logger.WarnContext(ctx, "Failed to write calc result to DataStore", "key", key, "error", err)
- return
- }
-
- s.logger.DebugContext(ctx, "Calc result cached to DataStore", "key", key, "ttl", ttl, "expires_at", entity.ExpiresAt)
}
// SetTokenValidation configures GitHub token validation.
@@ -673,8 +456,24 @@ func (s *Server) SetTokenValidation(appID string, keyFile string) error {
}
// Shutdown gracefully shuts down the server.
-func (*Server) Shutdown() {
- // Nothing to do - in-memory structures will be garbage collected.
+func (s *Server) Shutdown() {
+ ctx := context.Background()
+ // Close fido instances to flush any pending writes.
+ if s.githubCache != nil {
+ if err := s.githubCache.Close(); err != nil {
+ s.logger.WarnContext(ctx, "Failed to close GitHub cache", "error", err)
+ }
+ }
+ if s.prDataCache != nil {
+ if err := s.prDataCache.Close(); err != nil {
+ s.logger.WarnContext(ctx, "Failed to close PR data cache", "error", err)
+ }
+ }
+ if s.calcResultCache != nil {
+ if err := s.calcResultCache.Close(); err != nil {
+ s.logger.WarnContext(ctx, "Failed to close calc result cache", "error", err)
+ }
+ }
}
// sanitizeError removes tokens from error messages before logging.
@@ -955,11 +754,11 @@ func (*Server) extractToken(r *http.Request) string {
}
// Support "Bearer token" and "token token" formats.
- if strings.HasPrefix(auth, "Bearer ") {
- return strings.TrimPrefix(auth, "Bearer ")
+ if after, ok := strings.CutPrefix(auth, "Bearer "); ok {
+ return after
}
- if strings.HasPrefix(auth, "token ") {
- return strings.TrimPrefix(auth, "token ")
+ if after, ok := strings.CutPrefix(auth, "token "); ok {
+ return after
}
return auth
@@ -1091,8 +890,8 @@ func (s *Server) processRequest(ctx context.Context, req *CalculateRequest, toke
// Calculate costs.
breakdown = cost.Calculate(prData, cfg)
- // Cache the calculation result with 1 hour TTL for direct PR requests
- s.cacheCalcResult(ctx, req.URL, cfg, &breakdown, 1*time.Hour)
+ // Cache the calculation result
+ s.cacheCalcResult(ctx, req.URL, cfg, &breakdown)
return &CalculateResponse{
Breakdown: breakdown,
@@ -1568,27 +1367,15 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest,
// Calculate since date
since := time.Now().AddDate(0, 0, -req.Days)
- // Try cache first
- cacheKey := fmt.Sprintf("repo:%s/%s:days=%d", req.Owner, req.Repo, req.Days)
- prs, cached := s.cachedPRQuery(ctx, cacheKey)
- if cached {
- s.logger.InfoContext(ctx, "Using cached PR query results",
- "owner", req.Owner, "repo", req.Repo, "total_prs", len(prs))
- } else {
- // Fetch all PRs modified since the date
- var err error
- prs, err = github.FetchPRsFromRepo(ctx, req.Owner, req.Repo, since, token, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to fetch PRs: %w", err)
- }
-
- s.logger.InfoContext(ctx, "Fetched PRs from repository",
- "owner", req.Owner, "repo", req.Repo, "total_prs", len(prs))
-
- // Cache query results
- s.cachePRQuery(ctx, cacheKey, prs)
+ // Fetch all PRs modified since the date with caching
+ prs, err := s.githubClient.FetchPRsFromRepo(ctx, req.Owner, req.Repo, since, token, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch PRs: %w", err)
}
+ s.logger.InfoContext(ctx, "Fetched PRs from repository",
+ "owner", req.Owner, "repo", req.Repo, "total_prs", len(prs))
+
if len(prs) == 0 {
return nil, fmt.Errorf("no PRs found in the last %d days", req.Days)
}
@@ -1603,11 +1390,11 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest,
// Collect breakdowns from each sample and aggregate seconds_in_state
var breakdowns []cost.Breakdown
aggregatedSeconds := make(map[string]int)
- for i, pr := range samples {
- prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", req.Owner, req.Repo, pr.Number)
+ for i := range samples {
+ prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", req.Owner, req.Repo, samples[i].Number)
s.logger.InfoContext(ctx, "Processing sample PR",
"repo", fmt.Sprintf("%s/%s", req.Owner, req.Repo),
- "number", pr.Number,
+ "number", samples[i].Number,
"progress", fmt.Sprintf("%d/%d", i+1, len(samples)))
// Try cache first
@@ -1619,16 +1406,16 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest,
// Use configured data source with updatedAt for effective caching
if s.dataSource == "turnserver" {
var prDataWithAnalysis github.PRDataWithAnalysis
- prDataWithAnalysis, err = github.FetchPRDataWithAnalysisViaTurnserver(ctx, prURL, token, pr.UpdatedAt)
+ prDataWithAnalysis, err = github.FetchPRDataWithAnalysisViaTurnserver(ctx, prURL, token, samples[i].UpdatedAt)
if err == nil {
prData = prDataWithAnalysis.PRData
secondsInState = prDataWithAnalysis.Analysis.SecondsInState
}
} else {
- prData, err = github.FetchPRData(ctx, prURL, token, pr.UpdatedAt)
+ prData, err = github.FetchPRData(ctx, prURL, token, samples[i].UpdatedAt)
}
if err != nil {
- s.logger.WarnContext(ctx, "Failed to fetch PR data, skipping", "pr_number", pr.Number, "source", s.dataSource, errorKey, err)
+ s.logger.WarnContext(ctx, "Failed to fetch PR data, skipping", "pr_number", samples[i].Number, "source", s.dataSource, errorKey, err)
continue
}
@@ -1653,7 +1440,7 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest,
totalAuthors := github.CountUniqueAuthors(prs)
// Query for actual count of open PRs (not extrapolated from samples)
- openPRCount, err := github.CountOpenPRsInRepo(ctx, req.Owner, req.Repo, token)
+ openPRCount, err := s.githubClient.CountOpenPRsInRepo(ctx, req.Owner, req.Repo, token)
if err != nil {
s.logger.WarnContext(ctx, "Failed to count open PRs, using 0", errorKey, err)
openPRCount = 0
@@ -1661,17 +1448,17 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest,
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
@@ -1704,33 +1491,21 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to
// Calculate since date
since := time.Now().AddDate(0, 0, -req.Days)
- // Try cache first
- cacheKey := fmt.Sprintf("org:%s:days=%d", req.Org, req.Days)
- prs, cached := s.cachedPRQuery(ctx, cacheKey)
- if cached {
- s.logger.InfoContext(ctx, "Using cached PR query results",
- "org", req.Org, "total_prs", len(prs))
- } else {
- // Fetch all PRs across the org modified since the date
- var err error
- prs, err = github.FetchPRsFromOrg(ctx, req.Org, since, token, nil)
- if err != nil {
- return nil, fmt.Errorf("failed to fetch PRs: %w", err)
- }
-
- s.logger.InfoContext(ctx, "Fetched PRs from organization", "org", req.Org, "total_prs", len(prs))
-
- // Cache query results
- s.cachePRQuery(ctx, cacheKey, prs)
+ // Fetch all PRs across the org with caching
+ prs, err := s.githubClient.FetchPRsFromOrg(ctx, req.Org, since, token, nil)
+ if err != nil {
+ return nil, fmt.Errorf("failed to fetch PRs: %w", err)
}
+ s.logger.InfoContext(ctx, "Fetched PRs from organization", "org", req.Org, "total_prs", len(prs))
+
if len(prs) == 0 {
return nil, fmt.Errorf("no PRs found in the last %d days", req.Days)
}
// Fetch repository visibility for the organization (2x the time period for comprehensive coverage)
reposSince := time.Now().AddDate(0, 0, -req.Days*2)
- repoVisibilityData, err := github.FetchOrgRepositoriesWithActivity(ctx, req.Org, reposSince, token)
+ repoVisibilityData, err := s.githubClient.FetchOrgRepositoriesWithActivity(ctx, req.Org, reposSince, token)
if err != nil {
s.logger.WarnContext(ctx, "Failed to fetch repository visibility, assuming all public", "error", err)
repoVisibilityData = nil
@@ -1755,11 +1530,11 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to
// Collect breakdowns from each sample and aggregate seconds_in_state
var breakdowns []cost.Breakdown
aggregatedSeconds := make(map[string]int)
- for i, pr := range samples {
- prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", pr.Owner, pr.Repo, pr.Number)
+ for i := range samples {
+ prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", samples[i].Owner, samples[i].Repo, samples[i].Number)
s.logger.InfoContext(ctx, "Processing sample PR",
- "repo", fmt.Sprintf("%s/%s", pr.Owner, pr.Repo),
- "number", pr.Number,
+ "repo", fmt.Sprintf("%s/%s", samples[i].Owner, samples[i].Repo),
+ "number", samples[i].Number,
"progress", fmt.Sprintf("%d/%d", i+1, len(samples)))
// Try cache first
@@ -1771,16 +1546,16 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to
// Use configured data source with updatedAt for effective caching
if s.dataSource == "turnserver" {
var prDataWithAnalysis github.PRDataWithAnalysis
- prDataWithAnalysis, err = github.FetchPRDataWithAnalysisViaTurnserver(ctx, prURL, token, pr.UpdatedAt)
+ prDataWithAnalysis, err = github.FetchPRDataWithAnalysisViaTurnserver(ctx, prURL, token, samples[i].UpdatedAt)
if err == nil {
prData = prDataWithAnalysis.PRData
secondsInState = prDataWithAnalysis.Analysis.SecondsInState
}
} else {
- prData, err = github.FetchPRData(ctx, prURL, token, pr.UpdatedAt)
+ prData, err = github.FetchPRData(ctx, prURL, token, samples[i].UpdatedAt)
}
if err != nil {
- s.logger.WarnContext(ctx, "Failed to fetch PR data, skipping", "pr_number", pr.Number, "source", s.dataSource, errorKey, err)
+ s.logger.WarnContext(ctx, "Failed to fetch PR data, skipping", "pr_number", samples[i].Number, "source", s.dataSource, errorKey, err)
continue
}
@@ -1804,8 +1579,8 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to
// Count unique authors across all PRs (not just samples)
totalAuthors := github.CountUniqueAuthors(prs)
- // Count open PRs across the entire organization with a single query
- totalOpenPRs, err := github.CountOpenPRsInOrg(ctx, req.Org, token)
+ // Count open PRs across the entire organization
+ totalOpenPRs, err := s.githubClient.CountOpenPRsInOrg(ctx, req.Org, token)
if err != nil {
s.logger.WarnContext(ctx, "Failed to count open PRs in organization, using 0", errorKey, err)
totalOpenPRs = 0
@@ -1814,17 +1589,17 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
@@ -2131,53 +1906,45 @@ func (s *Server) processRepoSampleWithProgress(ctx context.Context, req *RepoSam
// Calculate since date
since := time.Now().AddDate(0, 0, -req.Days)
- // Try cache first
- cacheKey := fmt.Sprintf("repo:%s/%s:days=%d", req.Owner, req.Repo, req.Days)
- prs, cached := s.cachedPRQuery(ctx, cacheKey)
- if !cached {
- // Send progress update before GraphQL query
+ // Send progress update before GraphQL query
+ logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
+ Type: "fetching",
+ PR: 0,
+ Owner: req.Owner,
+ Repo: req.Repo,
+ Progress: fmt.Sprintf("Querying GitHub GraphQL API for %s/%s PRs (last %d days)...", req.Owner, req.Repo, req.Days),
+ }))
+
+ // Start keep-alive to prevent client timeout during GraphQL query
+ stopKeepAlive, connErr := startKeepAlive(writer)
+ defer close(stopKeepAlive)
+
+ // Check for connection errors in background
+ go func() {
+ if err := <-connErr; err != nil {
+ s.logger.WarnContext(ctx, "Client connection lost", errorKey, err)
+ }
+ }()
+
+ // Fetch all PRs with caching
+ progressCallback := func(queryName string, page int, prCount int) {
logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
Type: "fetching",
PR: 0,
Owner: req.Owner,
Repo: req.Repo,
- Progress: fmt.Sprintf("Querying GitHub GraphQL API for %s/%s PRs (last %d days)...", req.Owner, req.Repo, req.Days),
+ Progress: fmt.Sprintf("Fetching %s PRs (page %d, %d PRs found)...", queryName, page, prCount),
}))
+ }
- // Start keep-alive to prevent client timeout during GraphQL query
- stopKeepAlive, connErr := startKeepAlive(writer)
- defer close(stopKeepAlive)
-
- // Check for connection errors in background
- go func() {
- if err := <-connErr; err != nil {
- s.logger.WarnContext(ctx, "Client connection lost", errorKey, err)
- }
- }()
-
- // Fetch all PRs modified since the date with progress updates
- var err error
- progressCallback := func(queryName string, page int, prCount int) {
- logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
- Type: "fetching",
- PR: 0,
- Owner: req.Owner,
- Repo: req.Repo,
- Progress: fmt.Sprintf("Fetching %s PRs (page %d, %d PRs found)...", queryName, page, prCount),
- }))
- }
- //nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
- prs, err = github.FetchPRsFromRepo(workCtx, req.Owner, req.Repo, since, token, progressCallback)
- if err != nil {
- logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
- Type: "error",
- Error: fmt.Sprintf("Failed to fetch PRs: %v", err),
- }))
- return
- }
-
- // Cache query results
- s.cachePRQuery(ctx, cacheKey, prs)
+ //nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
+ prs, err := s.githubClient.FetchPRsFromRepo(workCtx, req.Owner, req.Repo, since, token, progressCallback)
+ if err != nil {
+ logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
+ Type: "error",
+ Error: fmt.Sprintf("Failed to fetch PRs: %v", err),
+ }))
+ return
}
if len(prs) == 0 {
@@ -2219,7 +1986,7 @@ func (s *Server) processRepoSampleWithProgress(ctx context.Context, req *RepoSam
// Query for actual count of open PRs (not extrapolated from samples)
//nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
- openPRCount, err := github.CountOpenPRsInRepo(workCtx, req.Owner, req.Repo, token)
+ openPRCount, err := s.githubClient.CountOpenPRsInRepo(workCtx, req.Owner, req.Repo, token)
if err != nil {
s.logger.WarnContext(ctx, "Failed to count open PRs, using 0", errorKey, err)
openPRCount = 0
@@ -2227,17 +1994,17 @@ func (s *Server) processRepoSampleWithProgress(ctx context.Context, req *RepoSam
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
@@ -2290,51 +2057,43 @@ func (s *Server) processOrgSampleWithProgress(ctx context.Context, req *OrgSampl
// Calculate since date
since := time.Now().AddDate(0, 0, -req.Days)
- // Try cache first
- cacheKey := fmt.Sprintf("org:%s:days=%d", req.Org, req.Days)
- prs, cached := s.cachedPRQuery(ctx, cacheKey)
- if !cached {
- // Send progress update before GraphQL query
+ // Send progress update before GraphQL query
+ logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
+ Type: "fetching",
+ PR: 0,
+ Progress: fmt.Sprintf("Querying GitHub Search API for %s org PRs (last %d days)...", req.Org, req.Days),
+ }))
+
+ // Start keep-alive to prevent client timeout during GraphQL query
+ stopKeepAlive, connErr := startKeepAlive(writer)
+ defer close(stopKeepAlive)
+
+ // Check for connection errors in background
+ go func() {
+ if err := <-connErr; err != nil {
+ s.logger.WarnContext(ctx, "Client connection lost", errorKey, err)
+ }
+ }()
+
+ // Fetch all PRs across the org with caching
+ progressCallback := func(queryName string, page int, prCount int) {
logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
Type: "fetching",
PR: 0,
- Progress: fmt.Sprintf("Querying GitHub Search API for %s org PRs (last %d days)...", req.Org, req.Days),
+ Owner: req.Org,
+ Repo: "",
+ Progress: fmt.Sprintf("Fetching %s PRs (page %d, %d PRs found)...", queryName, page, prCount),
}))
+ }
- // Start keep-alive to prevent client timeout during GraphQL query
- stopKeepAlive, connErr := startKeepAlive(writer)
- defer close(stopKeepAlive)
-
- // Check for connection errors in background
- go func() {
- if err := <-connErr; err != nil {
- s.logger.WarnContext(ctx, "Client connection lost", errorKey, err)
- }
- }()
-
- // Fetch all PRs across the org modified since the date with progress updates
- var err error
- progressCallback := func(queryName string, page int, prCount int) {
- logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
- Type: "fetching",
- PR: 0,
- Owner: req.Org,
- Repo: "",
- Progress: fmt.Sprintf("Fetching %s PRs (page %d, %d PRs found)...", queryName, page, prCount),
- }))
- }
- //nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
- prs, err = github.FetchPRsFromOrg(workCtx, req.Org, since, token, progressCallback)
- if err != nil {
- logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
- Type: "error",
- Error: fmt.Sprintf("Failed to fetch PRs: %v", err),
- }))
- return
- }
-
- // Cache query results
- s.cachePRQuery(ctx, cacheKey, prs)
+ //nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
+ prs, err := s.githubClient.FetchPRsFromOrg(workCtx, req.Org, since, token, progressCallback)
+ if err != nil {
+ logSSEError(ctx, s.logger, sendSSE(writer, ProgressUpdate{
+ Type: "error",
+ Error: fmt.Sprintf("Failed to fetch PRs: %v", err),
+ }))
+ return
}
if len(prs) == 0 {
@@ -2382,9 +2141,9 @@ func (s *Server) processOrgSampleWithProgress(ctx context.Context, req *OrgSampl
// Count unique authors across all PRs (not just samples)
totalAuthors := github.CountUniqueAuthors(prs)
- // Count open PRs across the entire organization with a single GraphQL query
+ // Count open PRs across the entire organization
//nolint:contextcheck // Using background context intentionally to prevent client timeout from canceling work
- totalOpenPRs, err := github.CountOpenPRsInOrg(workCtx, req.Org, token)
+ totalOpenPRs, err := s.githubClient.CountOpenPRsInOrg(workCtx, req.Org, token)
if err != nil {
s.logger.WarnContext(ctx, "Failed to count open PRs for organization", "org", req.Org, errorKey, err)
totalOpenPRs = 0 // Continue with 0 if we can't get the count
@@ -2393,17 +2152,17 @@ func (s *Server) processOrgSampleWithProgress(ctx context.Context, req *OrgSampl
// Convert PRSummary to PRSummaryInfo for extrapolation
prSummaryInfos := make([]cost.PRSummaryInfo, len(prs))
- for i, pr := range prs {
+ for i := range prs {
prSummaryInfos[i] = cost.PRSummaryInfo{
- Owner: pr.Owner,
- Repo: pr.Repo,
- Author: pr.Author,
- AuthorType: pr.AuthorType,
- CreatedAt: pr.CreatedAt,
- UpdatedAt: pr.UpdatedAt,
- ClosedAt: pr.ClosedAt,
- Merged: pr.Merged,
- State: pr.State,
+ Owner: prs[i].Owner,
+ Repo: prs[i].Repo,
+ Author: prs[i].Author,
+ AuthorType: prs[i].AuthorType,
+ CreatedAt: prs[i].CreatedAt,
+ UpdatedAt: prs[i].UpdatedAt,
+ ClosedAt: prs[i].ClosedAt,
+ Merged: prs[i].Merged,
+ State: prs[i].State,
}
}
@@ -2441,7 +2200,7 @@ func (s *Server) processPRsInParallel(workCtx, reqCtx context.Context, samples [
var wg sync.WaitGroup
totalSamples := len(samples)
- for idx, pr := range samples {
+ for idx := range samples {
wg.Add(1)
go func(index int, prSummary github.PRSummary) {
defer wg.Done()
@@ -2555,8 +2314,8 @@ func (s *Server) processPRsInParallel(workCtx, reqCtx context.Context, samples [
breakdown = cost.Calculate(prData, cfg)
- // Cache the calculation result with 1 week TTL for PRs from queries
- s.cacheCalcResult(workCtx, prURL, cfg, &breakdown, 7*24*time.Hour)
+ // Cache the calculation result
+ s.cacheCalcResult(workCtx, prURL, cfg, &breakdown)
// Add to results
mu.Lock()
@@ -2573,7 +2332,7 @@ func (s *Server) processPRsInParallel(workCtx, reqCtx context.Context, samples [
Progress: progress,
}))
sseMu.Unlock()
- }(idx, pr)
+ }(idx, samples[idx])
}
wg.Wait()
diff --git a/internal/server/server_test.go b/internal/server/server_test.go
index 906a233..8032b55 100644
--- a/internal/server/server_test.go
+++ b/internal/server/server_test.go
@@ -14,7 +14,6 @@ import (
"time"
"github.com/codeGROOVE-dev/prcost/pkg/cost"
- "github.com/codeGROOVE-dev/prcost/pkg/github"
)
func TestNew(t *testing.T) {
@@ -603,7 +602,8 @@ func TestCachePRDataMemory(t *testing.T) {
CreatedAt: time.Now(),
}
- key := "pr:https://github.com/owner/repo/pull/123"
+ // Use unique key with timestamp to avoid collision with persisted cache
+ key := fmt.Sprintf("test-pr:https://github.com/owner/repo/pull/123:ts=%d", time.Now().UnixNano())
// Initially should not be cached
_, cached := s.cachedPRData(ctx, key)
@@ -628,70 +628,6 @@ func TestCachePRDataMemory(t *testing.T) {
}
}
-func TestCachePRQueryMemory(t *testing.T) {
- s := New()
- ctx := testContext()
-
- prs := []github.PRSummary{
- {Number: 123, Owner: "owner", Repo: "repo", Author: "testuser", UpdatedAt: time.Now()},
- {Number: 456, Owner: "owner", Repo: "repo", Author: "testuser2", UpdatedAt: time.Now()},
- }
-
- key := "repo:owner/repo:days=30"
-
- // Initially should not be cached
- _, cached := s.cachedPRQuery(ctx, key)
- if cached {
- t.Error("PR query should not be cached initially")
- }
-
- // Cache the query results
- s.cachePRQuery(ctx, key, prs)
-
- // Should now be cached
- cachedPRs, cached := s.cachedPRQuery(ctx, key)
- if !cached {
- t.Error("PR query should be cached after caching")
- }
-
- if len(cachedPRs) != len(prs) {
- t.Errorf("Cached PR count = %d, want %d", len(cachedPRs), len(prs))
- }
- if cachedPRs[0].Number != prs[0].Number {
- t.Errorf("Cached PR number = %d, want %d", cachedPRs[0].Number, prs[0].Number)
- }
-}
-
-func TestCacheKeyPrefixes(t *testing.T) {
- s := New()
- ctx := testContext()
-
- // Test different key prefixes
- tests := []struct {
- name string
- key string
- }{
- {"PR key", "pr:https://github.com/owner/repo/pull/123"},
- {"Repo key", "repo:owner/repo:days=30"},
- {"Org key", "org:myorg:days=90"},
- }
-
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- prs := []github.PRSummary{{Number: 1}}
- s.cachePRQuery(ctx, tt.key, prs)
-
- cached, ok := s.cachedPRQuery(ctx, tt.key)
- if !ok {
- t.Errorf("Key %s should be cached", tt.key)
- }
- if len(cached) != 1 {
- t.Errorf("Expected 1 PR, got %d", len(cached))
- }
- })
- }
-}
-
func TestHandleCalculateInvalidJSON(t *testing.T) {
s := New()
@@ -2006,11 +1942,8 @@ func TestProcessRequestWithMock(t *testing.T) {
mockData := newMockPRData("test-author", 150, 3)
// Store in cache to simulate successful fetch
- s.prDataCacheMu.Lock()
- s.prDataCache["https://github.com/test/repo/pull/123"] = &cacheEntry{
- data: mockData,
- }
- s.prDataCacheMu.Unlock()
+ //nolint:errcheck // test setup - errors don't matter
+ _ = s.prDataCache.Set(ctx, "https://github.com/test/repo/pull/123", *mockData)
req := &CalculateRequest{
URL: "https://github.com/test/repo/pull/123",
@@ -2025,50 +1958,15 @@ func TestProcessRequestWithMock(t *testing.T) {
_ = err
}
-func TestCachedPRQueryHit(t *testing.T) {
- s := New()
- ctx := context.Background()
-
- // Pre-populate cache
- testPRs := newMockPRSummaries(5)
- key := "repo:owner/repo:30"
-
- s.prQueryCacheMu.Lock()
- s.prQueryCache[key] = &cacheEntry{data: testPRs}
- s.prQueryCacheMu.Unlock()
-
- // Test cache hit
- prs, found := s.cachedPRQuery(ctx, key)
- if !found {
- t.Error("Expected cache hit")
- }
- if len(prs) != 5 {
- t.Errorf("Expected 5 PRs, got %d", len(prs))
- }
-}
-
-func TestCachedPRQueryMiss(t *testing.T) {
- s := New()
- ctx := context.Background()
-
- // Test cache miss
- _, found := s.cachedPRQuery(ctx, "nonexistent-key")
- if found {
- t.Error("Expected cache miss")
- }
-}
-
func TestCachedPRDataHit(t *testing.T) {
s := New()
ctx := context.Background()
- // Pre-populate cache
+ // Pre-populate cache using the cache method (which sanitizes keys)
testData := newMockPRData("test-author", 100, 5)
key := "https://github.com/owner/repo/pull/123"
- s.prDataCacheMu.Lock()
- s.prDataCache[key] = &cacheEntry{data: *testData}
- s.prDataCacheMu.Unlock()
+ s.cachePRData(ctx, key, *testData)
// Test cache hit
data, found := s.cachedPRData(ctx, key)
@@ -2080,34 +1978,6 @@ func TestCachedPRDataHit(t *testing.T) {
}
}
-func TestCachePRQuery(t *testing.T) {
- s := New()
- ctx := context.Background()
-
- testPRs := newMockPRSummaries(3)
- key := "repo:owner/repo:30"
-
- // Cache the data
- s.cachePRQuery(ctx, key, testPRs)
-
- // Verify it was cached
- s.prQueryCacheMu.RLock()
- entry, exists := s.prQueryCache[key]
- s.prQueryCacheMu.RUnlock()
-
- if !exists {
- t.Error("Expected data to be cached")
- }
-
- if cached, ok := entry.data.([]github.PRSummary); ok {
- if len(cached) != 3 {
- t.Errorf("Expected 3 cached PRs, got %d", len(cached))
- }
- } else {
- t.Error("Cached data is not []github.PRSummary")
- }
-}
-
func TestCachePRData(t *testing.T) {
s := New()
ctx := context.Background()
@@ -2118,24 +1988,18 @@ func TestCachePRData(t *testing.T) {
// Cache the data
s.cachePRData(ctx, key, *testData)
- // Verify it was cached
- s.prDataCacheMu.RLock()
- entry, exists := s.prDataCache[key]
- s.prDataCacheMu.RUnlock()
+ // Verify it was cached by retrieving it via the cache method
+ cached, found := s.cachedPRData(ctx, key)
- if !exists {
+ if !found {
t.Error("Expected data to be cached")
}
- if cached, ok := entry.data.(cost.PRData); ok {
- if cached.Author != "author" {
- t.Errorf("Expected author 'author', got %s", cached.Author)
- }
- if cached.LinesAdded != 200 {
- t.Errorf("Expected 200 lines, got %d", cached.LinesAdded)
- }
- } else {
- t.Error("Cached data is not cost.PRData")
+ if cached.Author != "author" {
+ t.Errorf("Expected author 'author', got %s", cached.Author)
+ }
+ if cached.LinesAdded != 200 {
+ t.Errorf("Expected 200 lines, got %d", cached.LinesAdded)
}
}
@@ -2559,24 +2423,6 @@ func TestSanitizeErrorWithTokens(t *testing.T) {
}
}
-func TestCachePRQueryMemoryWrite(t *testing.T) {
- s := New()
- ctx := context.Background()
- testPRs := newMockPRSummaries(3)
- key := "test-cache-key"
-
- s.cachePRQuery(ctx, key, testPRs)
-
- // Verify it was cached in memory
- prs, found := s.cachedPRQuery(ctx, key)
- if !found {
- t.Fatal("Expected cache entry to be found")
- }
- if len(prs) != 3 {
- t.Errorf("Expected 3 PRs, got %d", len(prs))
- }
-}
-
func TestCachePRDataMemoryWrite(t *testing.T) {
s := New()
ctx := context.Background()
@@ -2608,37 +2454,8 @@ func TestCachedPRDataMissCache(t *testing.T) {
}
}
-func TestCachedPRQueryBadType(t *testing.T) {
- s := New()
- ctx := context.Background()
- key := "bad-type-key"
-
- // Store wrong type in cache
- s.prQueryCacheMu.Lock()
- s.prQueryCache[key] = &cacheEntry{data: "not a PR summary slice"}
- s.prQueryCacheMu.Unlock()
-
- _, found := s.cachedPRQuery(ctx, key)
- if found {
- t.Error("Expected cache miss for wrong type")
- }
-}
-
-func TestCachedPRDataBadType(t *testing.T) {
- s := New()
- ctx := context.Background()
- key := "bad-type-key"
-
- // Store wrong type in cache
- s.prDataCacheMu.Lock()
- s.prDataCache[key] = &cacheEntry{data: "not a PRData"}
- s.prDataCacheMu.Unlock()
-
- _, found := s.cachedPRData(ctx, key)
- if found {
- t.Error("Expected cache miss for wrong type")
- }
-}
+// TestCachedPRQueryBadType and TestCachedPRDataBadType removed:
+// fido uses generic types, so type mismatches are impossible at compile time.
func TestLimiterCleanupLarge(t *testing.T) {
s := New()
diff --git a/internal/server/server_test_mocks.go b/internal/server/server_test_mocks.go
index b7b033f..0054b95 100644
--- a/internal/server/server_test_mocks.go
+++ b/internal/server/server_test_mocks.go
@@ -5,7 +5,6 @@ import (
"time"
"github.com/codeGROOVE-dev/prcost/pkg/cost"
- "github.com/codeGROOVE-dev/prcost/pkg/github"
)
// Helper functions to create test data.
@@ -25,17 +24,3 @@ func newMockPRData(author string, linesAdded int, eventCount int) *cost.PRData {
Events: events,
}
}
-
-func newMockPRSummaries(count int) []github.PRSummary {
- summaries := make([]github.PRSummary, count)
- for i := range count {
- summaries[i] = github.PRSummary{
- Number: i + 1,
- Owner: "test-owner",
- Repo: "test-repo",
- Author: fmt.Sprintf("author%d", i),
- UpdatedAt: time.Now().Add(-time.Duration(i) * time.Hour),
- }
- }
- return summaries
-}
diff --git a/internal/server/static/formatR2RCallout.js b/internal/server/static/formatR2RCallout.js
index 4e7f6a7..53dde84 100644
--- a/internal/server/static/formatR2RCallout.js
+++ b/internal/server/static/formatR2RCallout.js
@@ -1,34 +1,49 @@
// Extracted from index.html for testing purposes
-function formatR2RCallout(avgOpenHours, r2rSavings, currentEfficiency, modeledEfficiency, targetMergeHours = 1.5) {
- // Only show if average merge velocity is > target
- if (avgOpenHours <= targetMergeHours) {
- return '';
- }
+function formatR2RCallout(
+ avgOpenHours,
+ r2rSavings,
+ currentEfficiency,
+ modeledEfficiency,
+ targetMergeHours = 1.5
+) {
+ // Only show if average merge velocity is > target
+ if (avgOpenHours <= targetMergeHours) {
+ return "";
+ }
- // Format savings with appropriate precision
- let savingsText;
- if (r2rSavings >= 1000000) {
- savingsText = '$' + (r2rSavings / 1000000).toFixed(1) + 'M';
- } else if (r2rSavings >= 1000) {
- savingsText = '$' + (r2rSavings / 1000).toFixed(0) + 'K';
- } else {
- savingsText = '$' + r2rSavings.toFixed(0);
- }
+ // Format savings with appropriate precision
+ let savingsText;
+ if (r2rSavings >= 1000000) {
+ savingsText = `$${(r2rSavings / 1000000).toFixed(1)}M`;
+ } else if (r2rSavings >= 1000) {
+ savingsText = `$${(r2rSavings / 1000).toFixed(0)}K`;
+ } else {
+ savingsText = `$${r2rSavings.toFixed(0)}`;
+ }
- const efficiencyDelta = modeledEfficiency - currentEfficiency;
+ const efficiencyDelta = modeledEfficiency - currentEfficiency;
- // Format target merge time
- let targetText = targetMergeHours.toFixed(1) + 'h';
+ // Format target merge time
+ const targetText = `${targetMergeHours.toFixed(1)}h`;
- let html = '
';
- html += '
\uD83D\uDCA1 Pro-Tip: Boost team throughput by
' + efficiencyDelta.toFixed(1) + '% and save
' + savingsText + '/yr by reducing merge times to <' + targetText + ' with ';
- html += '
Ready to Review. ';
- html += 'Free for open-source repositories, $6/user/org for private repos.';
- html += '
';
- return html;
+ let html =
+ "";
+ html +=
+ "
\uD83D\uDCA1 Pro-Tip: Boost team throughput by
" +
+ efficiencyDelta.toFixed(1) +
+ "% and save
" +
+ savingsText +
+ "/yr by reducing merge times to <" +
+ targetText +
+ " with ";
+ html +=
+ '
Ready to Review. ';
+ html += "Free for open-source repositories, $6/user/org for private repos.";
+ html += "
";
+ return html;
}
// Export for testing (Node.js) or use globally (browser)
-if (typeof module !== 'undefined' && module.exports) {
- module.exports = { formatR2RCallout };
+if (typeof module !== "undefined" && module.exports) {
+ module.exports = { formatR2RCallout };
}
diff --git a/internal/server/static/formatR2RCallout.test.js b/internal/server/static/formatR2RCallout.test.js
index 4d08bee..324b141 100644
--- a/internal/server/static/formatR2RCallout.test.js
+++ b/internal/server/static/formatR2RCallout.test.js
@@ -1,101 +1,113 @@
// Simple test for formatR2RCallout function
// Run with: node formatR2RCallout.test.js
-const { formatR2RCallout } = require('./formatR2RCallout.js');
-const assert = require('assert');
+const { formatR2RCallout } = require("./formatR2RCallout.js");
+const assert = require("node:assert");
function test(description, fn) {
- try {
- fn();
- console.log('✓', description);
- } catch (err) {
- console.error('✗', description);
- console.error(' ', err.message);
- process.exit(1);
- }
+ try {
+ fn();
+ console.log("✓", description);
+ } catch (err) {
+ console.error("✗", description);
+ console.error(" ", err.message);
+ process.exit(1);
+ }
}
// Test 1: Should return empty string for fast PRs (≤1.5 hours by default)
-test('Returns empty for PRs with avgOpenHours <= 1.5 (default)', () => {
- const result = formatR2RCallout(0.5, 50000, 60, 70);
- assert.strictEqual(result, '');
+test("Returns empty for PRs with avgOpenHours <= 1.5 (default)", () => {
+ const result = formatR2RCallout(0.5, 50000, 60, 70);
+ assert.strictEqual(result, "");
});
-test('Returns empty for PRs with avgOpenHours = 1.5 (default)', () => {
- const result = formatR2RCallout(1.5, 50000, 60, 70);
- assert.strictEqual(result, '');
+test("Returns empty for PRs with avgOpenHours = 1.5 (default)", () => {
+ const result = formatR2RCallout(1.5, 50000, 60, 70);
+ assert.strictEqual(result, "");
});
// Test 2: Should render callout for slow PRs (>1.5 hours by default)
-test('Renders callout for PRs with avgOpenHours > 1.5 (default)', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.length > 0, 'Should return non-empty HTML');
+test("Renders callout for PRs with avgOpenHours > 1.5 (default)", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.length > 0, "Should return non-empty HTML");
});
// Test 3: Should contain "Pro-Tip:" text and throughput boost
test('Contains "Pro-Tip:" text and throughput boost', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('💡'), 'Should contain lightbulb emoji');
- assert(result.includes('Pro-Tip:'), 'Should contain "Pro-Tip:"');
- assert(result.includes('Boost team throughput by'), 'Should contain throughput boost message');
- assert(result.includes('10.0%'), 'Should show efficiency delta of 10% (70 - 60)');
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.includes("💡"), "Should contain lightbulb emoji");
+ assert(result.includes("Pro-Tip:"), 'Should contain "Pro-Tip:"');
+ assert(result.includes("Boost team throughput by"), "Should contain throughput boost message");
+ assert(result.includes("10.0%"), "Should show efficiency delta of 10% (70 - 60)");
});
// Test 4: Should contain "Ready to Review" link
test('Contains "Ready to Review" link', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('Ready to Review'), 'Should contain "Ready to Review"');
- assert(result.includes('href="https://codegroove.dev/products/ready-to-review/"'), 'Should link to Ready to Review page');
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.includes("Ready to Review"), 'Should contain "Ready to Review"');
+ assert(
+ result.includes('href="https://codegroove.dev/products/ready-to-review/"'),
+ "Should link to Ready to Review page"
+ );
});
// Test 5: Should contain OSS pricing message
-test('Contains OSS pricing message', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('Free for open-source repositories'), 'Should contain OSS pricing message');
- assert(result.includes('$6/user/org for private repos'), 'Should contain private repo pricing');
+test("Contains OSS pricing message", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(
+ result.includes("Free for open-source repositories"),
+ "Should contain OSS pricing message"
+ );
+ assert(result.includes("$6/user/org for private repos"), "Should contain private repo pricing");
});
// Test 6: Should format savings in thousands (K)
-test('Formats savings with K suffix for thousands', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('$50K/yr'), 'Should format $50,000 as $50K/yr');
+test("Formats savings with K suffix for thousands", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.includes("$50K/yr"), "Should format $50,000 as $50K/yr");
});
// Test 7: Should format savings in millions (M)
-test('Formats savings with M suffix for millions', () => {
- const result = formatR2RCallout(10, 2500000, 60, 70);
- assert(result.includes('$2.5M/yr'), 'Should format $2,500,000 as $2.5M/yr');
+test("Formats savings with M suffix for millions", () => {
+ const result = formatR2RCallout(10, 2500000, 60, 70);
+ assert(result.includes("$2.5M/yr"), "Should format $2,500,000 as $2.5M/yr");
});
// Test 8: Should format small savings without suffix
-test('Formats small savings without suffix', () => {
- const result = formatR2RCallout(10, 500, 60, 70);
- assert(result.includes('$500/yr'), 'Should format $500 as $500/yr');
+test("Formats small savings without suffix", () => {
+ const result = formatR2RCallout(10, 500, 60, 70);
+ assert(result.includes("$500/yr"), "Should format $500 as $500/yr");
});
// Test 9: Should contain "reducing merge times to <1.5h" (default)
-test('Contains merge time reduction message (default 1.5h)', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('reducing merge times to <1.5h'), 'Should mention reducing merge times to <1.5h');
+test("Contains merge time reduction message (default 1.5h)", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(
+ result.includes("reducing merge times to <1.5h"),
+ "Should mention reducing merge times to <1.5h"
+ );
});
// Test 9b: Should use custom target merge time when provided
-test('Uses custom target merge time when provided', () => {
- const result = formatR2RCallout(10, 50000, 60, 70, 2.0);
- assert(result.includes('reducing merge times to <2.0h'), 'Should mention reducing merge times to <2.0h');
+test("Uses custom target merge time when provided", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70, 2.0);
+ assert(
+ result.includes("reducing merge times to <2.0h"),
+ "Should mention reducing merge times to <2.0h"
+ );
});
// Test 10: Should contain proper HTML structure
-test('Contains proper HTML div wrapper', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.startsWith(''), 'Should end with
');
+test("Contains proper HTML div wrapper", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.startsWith(""), "Should end with
");
});
// Test 11: Should use green color scheme
-test('Uses green color scheme', () => {
- const result = formatR2RCallout(10, 50000, 60, 70);
- assert(result.includes('#00c853'), 'Should include green color #00c853');
+test("Uses green color scheme", () => {
+ const result = formatR2RCallout(10, 50000, 60, 70);
+ assert(result.includes("#00c853"), "Should include green color #00c853");
});
-console.log('\nAll tests passed! ✓');
+console.log("\nAll tests passed! ✓");
diff --git a/internal/server/static/index.html b/internal/server/static/index.html
index e35c5c9..f325e60 100644
--- a/internal/server/static/index.html
+++ b/internal/server/static/index.html
@@ -70,6 +70,34 @@
transition: all 0.3s ease;
}
+ a {
+ color: #007aff;
+ text-decoration: none;
+ position: relative;
+ transition: all 0.2s ease;
+ font-weight: 500;
+ }
+
+ a:hover {
+ text-decoration: none;
+ color: #0051d5;
+ }
+
+ a::after {
+ content: "";
+ position: absolute;
+ bottom: -2px;
+ left: 0;
+ width: 0;
+ height: 2px;
+ background: #ffcc00;
+ transition: width 0.3s cubic-bezier(0.68, -0.55, 0.265, 1.55);
+ }
+
+ a:hover::after {
+ width: 100%;
+ }
+
h1 a {
color: inherit;
text-decoration: none;
@@ -110,6 +138,14 @@
}
}
+ label {
+ display: block;
+ margin-bottom: 8px;
+ font-weight: 600;
+ color: #1d1d1f;
+ font-size: 14px;
+ }
+
.mode-selector {
display: flex;
gap: 12px;
@@ -207,14 +243,6 @@
}
}
- label {
- display: block;
- margin-bottom: 8px;
- font-weight: 600;
- color: #1d1d1f;
- font-size: 14px;
- }
-
.required-indicator {
color: #ff3b30;
margin-left: 2px;
@@ -485,6 +513,10 @@
display: none;
}
+ .result-section pre {
+ margin-top: 0;
+ }
+
#result {
margin-top: 30px;
margin-bottom: 30px;
@@ -525,6 +557,12 @@
line-height: 1.6;
}
+ .efficiency-callout strong,
+ .efficiency-callout-success strong {
+ color: #000;
+ font-weight: 600;
+ }
+
.result-summary .meta strong {
color: #1d1d1f;
font-weight: 600;
@@ -545,10 +583,6 @@
display: inline-block;
}
- .result-section pre {
- margin-top: 0;
- }
-
.efficiency-section {
background: linear-gradient(135deg, #f0f9ff 0%, #ffffff 100%);
padding: 20px;
@@ -679,12 +713,6 @@
margin-bottom: 0;
}
- .efficiency-callout strong,
- .efficiency-callout-success strong {
- color: #000;
- font-weight: 600;
- }
-
.efficiency-callout a,
.efficiency-callout-success a {
color: #007aff;
@@ -907,34 +935,6 @@
margin-bottom: 0;
}
- a {
- color: #007aff;
- text-decoration: none;
- position: relative;
- transition: all 0.2s ease;
- font-weight: 500;
- }
-
- a:hover {
- text-decoration: none;
- color: #0051d5;
- }
-
- a::after {
- content: "";
- position: absolute;
- bottom: -2px;
- left: 0;
- width: 0;
- height: 2px;
- background: #ffcc00;
- transition: width 0.3s cubic-bezier(0.68, -0.55, 0.265, 1.55);
- }
-
- a:hover::after {
- width: 100%;
- }
-
.footer a {
margin: 0 10px;
}
@@ -1231,6 +1231,7 @@ Why calculate PR costs?
});
});
+ // biome-ignore lint/correctness/noUnusedVariables: Used via onclick attribute in HTML
function toggleAdvanced() {
const config = document.getElementById('advancedConfig');
const btn = document.querySelector('.advanced-toggle');
@@ -1243,7 +1244,7 @@ Why calculate PR costs?
}
function formatCurrency(amount) {
- return '$' + amount.toFixed(2).replace(/\B(?=(\d{3})+(?!\d))/g, ',');
+ return `$${amount.toFixed(2).replace(/\B(?=(\d{3})+(?!\d))/g, ',')}`;
}
function formatLOC(kloc) {
@@ -1251,7 +1252,7 @@ Why calculate PR costs?
// For values < 1k LOC, just show LOC count without 'k' suffix
if (loc < 1000) {
- return Math.floor(loc) + ' LOC';
+ return `${Math.floor(loc)} LOC`;
}
// For values >= 100k, add commas (e.g., "1,517k" instead of "1517k")
@@ -1266,47 +1267,47 @@ Why calculate PR costs?
if (kloc < 1000.0 && fracPart >= 0.05) {
return `${intStr}.${Math.floor(fracPart * 10)}k LOC`;
}
- return intStr + 'k LOC';
+ return `${intStr}k LOC`;
}
// For values < 100k, use existing precision logic
if (kloc < 0.1 && kloc > 0) {
- return kloc.toFixed(2) + 'k LOC';
+ return `${kloc.toFixed(2)}k LOC`;
}
if (kloc < 1.0) {
- return kloc.toFixed(1) + 'k LOC';
+ return `${kloc.toFixed(1)}k LOC`;
}
if (kloc < 10.0) {
- return kloc.toFixed(1) + 'k LOC';
+ return `${kloc.toFixed(1)}k LOC`;
}
- return Math.floor(kloc) + 'k LOC';
+ return `${Math.floor(kloc)}k LOC`;
}
function formatTimeUnit(hours) {
if (hours < 1) {
- return (hours * 60).toFixed(1) + 'm';
+ return `${(hours * 60).toFixed(1)}m`;
}
if (hours < 48) {
- return hours.toFixed(1) + 'h';
+ return `${hours.toFixed(1)}h`;
}
const days = hours / 24;
if (days < 14) {
- return days.toFixed(1) + 'd';
+ return `${days.toFixed(1)}d`;
}
const weeks = days / 7;
if (weeks < 8) {
- return weeks.toFixed(1) + 'w';
+ return `${weeks.toFixed(1)}w`;
}
const months = days / 30;
if (months < 24) {
- return months.toFixed(1) + 'mo';
+ return `${months.toFixed(1)}mo`;
}
const years = days / 365;
- return years.toFixed(1) + 'y';
+ return `${years.toFixed(1)}y`;
}
// Ledger formatting functions - all output must use these for consistency
@@ -1328,50 +1329,51 @@ Why calculate PR costs?
return ` ${paddedLabel} ${formatCurrency(cost).padStart(15)} ${paddedTimeUnit} ${detail}\n`;
}
- // formatSummaryLine formats a summary line (like Preventable Loss Total) with 2-space indent
+ // biome-ignore lint/correctness/noUnusedVariables: Reserved for future use
function formatSummaryLine(label, cost, timeUnit, detail) {
const paddedLabel = label.padEnd(30);
const paddedTimeUnit = timeUnit.padEnd(6);
return ` ${paddedLabel} ${formatCurrency(cost).padStart(15)} ${paddedTimeUnit} ${detail}\n`;
}
- // formatTotalLine formats a total line with 2-space indent
+ // biome-ignore lint/correctness/noUnusedVariables: Reserved for future use
function formatTotalLine(label, cost, timeUnit) {
const paddedLabel = label.padEnd(30);
const paddedTimeUnit = timeUnit.padEnd(6);
return ` ${paddedLabel} ${formatCurrency(cost).padStart(15)} ${paddedTimeUnit}\n`;
}
- // formatSectionDivider formats the divider line under subtotals
+ // biome-ignore lint/correctness/noUnusedVariables: Reserved for future use
function formatSectionDivider() {
return ' ──────────────\n';
}
+ // biome-ignore lint/correctness/noUnusedVariables: Reserved for future use
function formatEngTimeUnit(hours) {
if (hours < 1) {
- return (hours * 60).toFixed(1) + 'm';
+ return `${(hours * 60).toFixed(1)}m`;
}
if (hours < 48) {
- return hours.toFixed(1) + 'h';
+ return `${hours.toFixed(1)}h`;
}
const days = hours / 24;
if (days < 14) {
- return days.toFixed(1) + 'd';
+ return `${days.toFixed(1)}d`;
}
const weeks = days / 7;
if (weeks < 8) {
- return weeks.toFixed(1) + 'w';
+ return `${weeks.toFixed(1)}w`;
}
const months = days / 30;
if (months < 24) {
- return months.toFixed(1) + 'mo';
+ return `${months.toFixed(1)}mo`;
}
const years = days / 365;
- return years.toFixed(1) + 'y';
+ return `${years.toFixed(1)}y`;
}
function efficiencyGrade(efficiencyPct) {
@@ -1426,7 +1428,7 @@ Why calculate PR costs?
}
}
- function formatEfficiencyHTML(efficiencyPct, grade, message, preventableCost, preventableHours, totalCost, totalHours, avgOpenHours, isAnnual = false, annualWasteCost = 0, annualWasteHours = 0, wasteHoursPerWeek = 0, wasteCostPerWeek = 0, wasteHoursPerAuthorPerWeek = 0, wasteCostPerAuthorPerWeek = 0, totalAuthors = 0, salary = 250000, benefitsMultiplier = 1.2, analysisType = 'project', sourceName = '', mergeRate = 0, mergedPRs = 0, unmergedPRs = 0, velocityGrade = '', velocityMessage = '', mergeRateGrade = '', mergeRateMessage = '', days = 60) {
+ function formatEfficiencyHTML(efficiencyPct, grade, message, _preventableCost, _preventableHours, _totalCost, _totalHours, avgOpenHours, isAnnual = false, annualWasteCost = 0, _annualWasteHours = 0, _wasteHoursPerWeek = 0, _wasteCostPerWeek = 0, _wasteHoursPerAuthorPerWeek = 0, _wasteCostPerAuthorPerWeek = 0, _totalAuthors = 0, salary = 250000, benefitsMultiplier = 1.2, _analysisType = 'project', _sourceName = '', mergeRate = 0, mergedPRs = 0, unmergedPRs = 0, velocityGrade = '', velocityMessage = '', mergeRateGradeParam = '', mergeRateMessage = '', days = 60) {
let html = '';
// Development Efficiency box
@@ -1450,14 +1452,14 @@
Why calculate PR costs?
html += `${formatTimeUnit(avgOpenHours)}`;
html += '';
html += `${velocityGradeObj.message}
`;
- const cutoffDays = parseInt(days) * 2;
+ const cutoffDays = parseInt(days, 10) * 2;
html += `Excludes open PRs created >${cutoffDays}d ago
`;
html += ''; // Close efficiency-box
// Merge Success Rate box (if data available) - use backend-computed grades if provided
if (mergedPRs + unmergedPRs > 0) {
- const mergeRateGradeObj = mergeRateGrade && mergeRateMessage
- ? { grade: mergeRateGrade, message: mergeRateMessage }
+ const mergeRateGradeObj = mergeRateGradeParam && mergeRateMessage
+ ? { grade: mergeRateGradeParam, message: mergeRateMessage }
: mergeRateGrade(mergeRate);
html += '';
html += '
Merge Success
';
@@ -1476,7 +1478,7 @@
Why calculate PR costs?
html += '
Projected Annual Waste
';
html += '
';
const annualWasteRounded = Math.round(annualWasteCost);
- const annualWasteFormatted = '$' + annualWasteRounded.toLocaleString('en-US');
+ const annualWasteFormatted = `$${annualWasteRounded.toLocaleString('en-US')}`;
html += `${annualWasteFormatted}`;
html += '
';
const annualCostPerHead = salary * benefitsMultiplier;
@@ -1498,20 +1500,20 @@
Why calculate PR costs?
// Format savings with appropriate precision
let savingsText;
if (r2rSavings >= 1000000) {
- savingsText = '$' + (r2rSavings / 1000000).toFixed(1) + 'M';
+ savingsText = `$${(r2rSavings / 1000000).toFixed(1)}M`;
} else if (r2rSavings >= 1000) {
- savingsText = '$' + (r2rSavings / 1000).toFixed(0) + 'K';
+ savingsText = `$${(r2rSavings / 1000).toFixed(0)}K`;
} else {
- savingsText = '$' + r2rSavings.toFixed(0);
+ savingsText = `$${r2rSavings.toFixed(0)}`;
}
const efficiencyDelta = modeledEfficiency - currentEfficiency;
// Format target merge time
- let targetText = targetMergeHours.toFixed(1) + 'h';
+ const targetText = `${targetMergeHours.toFixed(1)}h`;
let html = '
';
- html += '
\uD83D\uDCA1 Pro-Tip: Boost team throughput by
' + efficiencyDelta.toFixed(1) + '% and save
' + savingsText + '/yr by reducing merge times to <' + targetText + ' with ';
+ html += `
\uD83D\uDCA1 Pro-Tip: Boost team throughput by
${efficiencyDelta.toFixed(1)}% and save
${savingsText}/yr by reducing merge times to <${targetText} with `;
html += '
Ready to Review. ';
html += 'Free for open-source repositories, $6/user/org for private repos.';
html += '
';
@@ -1527,21 +1529,21 @@
Why calculate PR costs?
// Format savings with appropriate precision
let savingsText;
if (modeledSavings >= 1000000) {
- savingsText = '$' + (modeledSavings / 1000000).toFixed(1) + 'M';
+ savingsText = `$${(modeledSavings / 1000000).toFixed(1)}M`;
} else if (modeledSavings >= 1000) {
- savingsText = '$' + (modeledSavings / 1000).toFixed(0) + 'K';
+ savingsText = `$${(modeledSavings / 1000).toFixed(0)}K`;
} else {
- savingsText = '$' + modeledSavings.toFixed(0);
+ savingsText = `$${modeledSavings.toFixed(0)}`;
}
const efficiencyDelta = modeledEfficiency - currentEfficiency;
let throughputText = '';
if (efficiencyDelta > 0) {
- throughputText = ' (+' + efficiencyDelta.toFixed(1) + '% throughput)';
+ throughputText = ` (+${efficiencyDelta.toFixed(1)}% throughput)`;
}
let html = '
';
- html += '💡 Merge Time Modeling: If you lowered your average merge time to 1.5h, you would save ~' + savingsText + '/yr in engineering overhead' + throughputText + '.';
+ html += `💡 Merge Time Modeling: If you lowered your average merge time to 1.5h, you would save ~${savingsText}/yr in engineering overhead${throughputText}.`;
html += '
';
return html;
}
@@ -1674,7 +1676,9 @@
Why calculate PR costs?
// Total
let totalHours = b.author.total_hours + b.delay_cost_detail.total_delay_hours;
if (b.participants) {
- b.participants.forEach(p => totalHours += p.total_hours);
+ for (const p of b.participants) {
+ totalHours += p.total_hours;
+ }
}
output += ' ═══════════════════════════════════════════════════════════════\n';
output += ` Total ${formatCurrency(b.total_cost).padStart(12)} ${formatTimeUnit(totalHours)}\n\n`;
@@ -1934,7 +1938,7 @@
Why calculate PR costs?
return output;
}
- function formatExtrapolatedTotal(e, days) {
+ function formatExtrapolatedTotal(e, _days) {
let output = '';
// Calculate LOC for header and lines
@@ -1979,13 +1983,13 @@
Why calculate PR costs?
}
// Delay Costs
- let delayCostsHeader = ' Delay Costs (human PRs avg ' + formatTimeUnit(e.avg_human_pr_duration_hours || 0) + ' open';
+ let delayCostsHeader = ` Delay Costs (human PRs avg ${formatTimeUnit(e.avg_human_pr_duration_hours || 0)} open`;
if ((e.bot_prs || 0) > 0) {
- delayCostsHeader += ', bot PRs avg ' + formatTimeUnit(e.avg_bot_pr_duration_hours || 0);
+ delayCostsHeader += `, bot PRs avg ${formatTimeUnit(e.avg_bot_pr_duration_hours || 0)}`;
}
delayCostsHeader += ')';
- output += delayCostsHeader + '\n';
- output += ' ' + '─'.repeat(delayCostsHeader.length - 2) + '\n';
+ output += `${delayCostsHeader}\n`;
+ output += ` ${'─'.repeat(delayCostsHeader.length - 2)}\n`;
if ((e.delivery_delay_cost || 0) > 0) {
output += formatItemLine("Workstream blockage", e.delivery_delay_cost, formatTimeUnit(e.delivery_delay_hours), `(${e.human_prs || 0} PRs)`);
@@ -2134,11 +2138,11 @@
Why calculate PR costs?
const sampleSize = document.getElementById('repoSampleSize').value;
const days = document.getElementById('repoDays').value;
if (sampleSize) {
- request.sample_size = parseInt(sampleSize);
+ request.sample_size = parseInt(sampleSize, 10);
queryParams.set('sample', sampleSize);
}
if (days) {
- request.days = parseInt(days);
+ request.days = parseInt(days, 10);
queryParams.set('days', days);
}
} else {
@@ -2151,11 +2155,11 @@
Why calculate PR costs?
const sampleSize = document.getElementById('orgSampleSize').value;
const days = document.getElementById('orgDays').value;
if (sampleSize) {
- request.sample_size = parseInt(sampleSize);
+ request.sample_size = parseInt(sampleSize, 10);
queryParams.set('sample', sampleSize);
}
if (days) {
- request.days = parseInt(days);
+ request.days = parseInt(days, 10);
queryParams.set('days', days);
}
}
@@ -2201,7 +2205,9 @@
Why calculate PR costs?
b.delay_cost_detail.automated_updates_cost + b.delay_cost_detail.pr_tracking_cost;
let totalHours = b.author.total_hours + b.delay_cost_detail.total_delay_hours;
if (b.participants) {
- b.participants.forEach(p => totalHours += p.total_hours);
+ for (const p of b.participants) {
+ totalHours += p.total_hours;
+ }
}
const efficiencyPct = totalHours > 0 ? 100.0 * (totalHours - preventableHours) / totalHours : 100.0;
const { grade, message } = efficiencyGrade(efficiencyPct);
@@ -2216,7 +2222,7 @@
Why calculate PR costs?
html += '
';
html += '
Cost Breakdown
';
- html += '
' + formatBreakdown(data) + '
';
+ html += `
${formatBreakdown(data)}
`;
html += '
';
resultDiv.innerHTML = html;
@@ -2261,7 +2267,7 @@
Why calculate PR costs?
if (isRetryable && attempt < maxRetries) {
// Exponential backoff with jitter: 1s, 2s, 4s, 8s, 16s, 32s, 64s, up to 120s
- const baseDelay = Math.min(1000 * Math.pow(2, attempt - 1), 120000);
+ const baseDelay = Math.min(1000 * 2 ** (attempt - 1), 120000);
// Add jitter: random value between 0% and 25% of base delay
const jitter = Math.random() * 0.25 * baseDelay;
const delay = Math.floor(baseDelay + jitter);
@@ -2284,14 +2290,15 @@
Why calculate PR costs?
}
}
- async function attemptStreamingRequest(endpoint, request, resultDiv, attempt, maxRetries) {
+ async function attemptStreamingRequest(endpoint, request, resultDiv, _attempt, _maxRetries) {
// EventSource doesn't support POST, so we need a different approach
// We'll use fetch to initiate, but handle it as a proper SSE stream
return new Promise((resolve, reject) => {
- let progressContainer;
+ let _progressContainer;
let lastActivityTime = Date.now();
let timeoutId;
let reader; // Declare reader in outer scope to prevent race condition
+ const submitBtn = document.querySelector('button[type="submit"]');
// Set up activity timeout (10 seconds of no data = connection lost)
// Server sends updates every ~5s, so 10s allows for network latency
@@ -2302,7 +2309,7 @@
Why calculate PR costs?
const elapsed = Date.now() - lastActivityTime;
if (elapsed >= 10000) {
if (reader) {
- reader.cancel().catch(() => {}); // Ignore cancel errors
+ reader.cancel().catch(() => { /* intentionally ignored */ });
}
reject(new Error('Stream timeout: no data received for 10 seconds'));
}
@@ -2354,7 +2361,7 @@
Why calculate PR costs?
if (data.type === 'error' && !data.pr) {
// Global error
if (timeoutId) clearTimeout(timeoutId);
- reader.cancel().catch(() => {}); // Ignore cancel errors
+ reader.cancel().catch(() => { /* intentionally ignored */ });
reject(new Error(data.error));
return;
}
@@ -2363,7 +2370,7 @@
Why calculate PR costs?
// Final result - show both average and extrapolated in separate sections
const mode = document.querySelector('input[name="mode"]:checked').value;
const e = data.result;
- const days = document.getElementById(mode + 'Days').value || '90';
+ const days = document.getElementById(`${mode}Days`).value || '90';
// Get source name
let sourceName;
@@ -2398,7 +2405,7 @@
Why calculate PR costs?
const extEfficiencyPct = e.total_hours > 0 ? 100.0 * (e.total_hours - extPreventableHours) / e.total_hours : 100.0;
// Use grades computed by backend (single source of truth)
const extEfficiency = { grade: e.efficiency_grade, message: e.efficiency_message };
- const annualMultiplier = 365.0 / parseInt(days);
+ const annualMultiplier = 365.0 / parseInt(days, 10);
const annualWasteHours = extPreventableHours * annualMultiplier;
const annualWasteCost = extPreventableCost * annualMultiplier;
@@ -2456,12 +2463,12 @@
Why calculate PR costs?
const avgDeliveryDelayCost = e.delivery_delay_cost / totalPRs;
const avgCodeChurnHours = e.code_churn_hours / totalPRs;
const avgDeliveryDelayHours = e.delivery_delay_hours / totalPRs;
- const avgTotalCost = e.total_cost / totalPRs;
+ const _avgTotalCost = e.total_cost / totalPRs;
const avgTotalHours = e.total_hours / totalPRs;
const avgPreventableHours = avgCodeChurnHours + avgDeliveryDelayHours + avgCodeChurnHours + (e.automated_updates_hours / totalPRs) + (e.pr_tracking_hours / totalPRs);
- const avgPreventableCost = avgCodeChurnCost + avgDeliveryDelayCost + avgCodeChurnCost + (e.automated_updates_cost / totalPRs) + (e.pr_tracking_cost / totalPRs);
+ const _avgPreventableCost = avgCodeChurnCost + avgDeliveryDelayCost + avgCodeChurnCost + (e.automated_updates_cost / totalPRs) + (e.pr_tracking_cost / totalPRs);
const avgEfficiencyPct = avgTotalHours > 0 ? 100.0 * (avgTotalHours - avgPreventableHours) / avgTotalHours : 100.0;
- const avgEfficiency = efficiencyGrade(avgEfficiencyPct);
+ const _avgEfficiency = efficiencyGrade(avgEfficiencyPct);
// Add workflow timeline if available (only when using turnserver)
if (data.seconds_in_state) {
@@ -2477,13 +2484,13 @@
Why calculate PR costs?
// Extrapolated total section
html += '
';
html += `
${days}-day Estimated Costs
`;
- html += '
' + formatExtrapolatedTotal(e, days) + '
';
+ html += `
${formatExtrapolatedTotal(e, days)}
`;
html += '
';
// Average PR section
html += '
';
html += `
Average PR (sampled over ${days} day period)
`;
- html += '
' + formatAveragePR(e) + '
';
+ html += `
${formatAveragePR(e)}
`;
html += '
';
resultDiv.innerHTML = html;
diff --git a/pkg/cost/analyze.go b/pkg/cost/analyze.go
index a807b37..a02820c 100644
--- a/pkg/cost/analyze.go
+++ b/pkg/cost/analyze.go
@@ -75,21 +75,21 @@ func AnalyzePRs(ctx context.Context, req *AnalysisRequest) (*AnalysisResult, err
// Sequential processing
if concurrency == 1 {
- for i, pr := range req.Samples {
- prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", pr.Owner, pr.Repo, pr.Number)
+ for i := range req.Samples {
+ prURL := fmt.Sprintf("https://github.com/%s/%s/pull/%d", req.Samples[i].Owner, req.Samples[i].Repo, req.Samples[i].Number)
if req.Logger != nil {
req.Logger.InfoContext(ctx, "Processing sample PR",
- "repo", fmt.Sprintf("%s/%s", pr.Owner, pr.Repo),
- "number", pr.Number,
+ "repo", fmt.Sprintf("%s/%s", req.Samples[i].Owner, req.Samples[i].Repo),
+ "number", req.Samples[i].Number,
"progress", fmt.Sprintf("%d/%d", i+1, len(req.Samples)))
}
- prData, err := req.Fetcher.FetchPRData(ctx, prURL, pr.UpdatedAt)
+ prData, err := req.Fetcher.FetchPRData(ctx, prURL, req.Samples[i].UpdatedAt)
if err != nil {
if req.Logger != nil {
req.Logger.WarnContext(ctx, "Failed to fetch PR data, skipping",
- "pr_number", pr.Number, "error", err)
+ "pr_number", req.Samples[i].Number, "error", err)
}
skipped++
continue
@@ -103,11 +103,10 @@ func AnalyzePRs(ctx context.Context, req *AnalysisRequest) (*AnalysisResult, err
var wg sync.WaitGroup
semaphore := make(chan struct{}, concurrency)
- for i, pr := range req.Samples {
- wg.Add(1)
- go func(index int, prInfo PRSummaryInfo) {
- defer wg.Done()
-
+ for i := range req.Samples {
+ index := i
+ prInfo := req.Samples[i]
+ wg.Go(func() {
// Acquire semaphore slot
semaphore <- struct{}{}
defer func() { <-semaphore }()
@@ -137,7 +136,7 @@ func AnalyzePRs(ctx context.Context, req *AnalysisRequest) (*AnalysisResult, err
mu.Lock()
breakdowns = append(breakdowns, breakdown)
mu.Unlock()
- }(i, pr)
+ })
}
wg.Wait()
diff --git a/pkg/cost/cost.go b/pkg/cost/cost.go
index bf6c8a0..ad012ca 100644
--- a/pkg/cost/cost.go
+++ b/pkg/cost/cost.go
@@ -272,18 +272,11 @@ func Calculate(data PRData, cfg Config) Breakdown {
delayDays := delayHours / 24.0
// Find the last event timestamp to determine time since last activity
- var lastEventTime time.Time
+ lastEventTime := data.CreatedAt
if len(data.Events) > 0 {
- // Find the most recent event
- lastEventTime = data.Events[0].Timestamp
- for _, event := range data.Events {
- if event.Timestamp.After(lastEventTime) {
- lastEventTime = event.Timestamp
- }
- }
- } else {
- // No events, use CreatedAt
- lastEventTime = data.CreatedAt
+ lastEventTime = slices.MaxFunc(data.Events, func(a, b ParticipantEvent) int {
+ return a.Timestamp.Compare(b.Timestamp)
+ }).Timestamp
}
// Calculate time since last event (using endTime)
@@ -676,13 +669,9 @@ func calculateParticipantCosts(data PRData, cfg Config, hourlyRate float64) []Pa
for actor, events := range eventsByActor {
// Check if this person is a reviewer (has review or review_comment events)
- isReviewer := false
- for _, event := range events {
- if event.Kind == "review" || event.Kind == "review_comment" {
- isReviewer = true
- break
- }
- }
+ isReviewer := slices.ContainsFunc(events, func(e ParticipantEvent) bool {
+ return e.Kind == "review" || e.Kind == "review_comment"
+ })
// Calculate review cost (LOC-based, once per reviewer)
var reviewHours float64
@@ -827,14 +816,14 @@ func calculateSessionCosts(events []ParticipantEvent, cfg Config) (githubHours,
// Between sessions: context out + context in, capped by gap
for i := range len(sessionGroups) - 1 {
- lastEventOfSession := sorted[sessionGroups[i].end].Timestamp
- firstEventOfNextSession := sorted[sessionGroups[i+1].start].Timestamp
- gap := firstEventOfNextSession.Sub(lastEventOfSession)
+ prev := sorted[sessionGroups[i].end].Timestamp
+ next := sorted[sessionGroups[i+1].start].Timestamp
+ gap := next.Sub(prev)
// Maximum context switch is contextOut + contextIn
- maxContextSwitch := contextOut + contextIn
- if gap >= maxContextSwitch {
- contextTime += maxContextSwitch
+ maxSwitch := contextOut + contextIn
+ if gap >= maxSwitch {
+ contextTime += maxSwitch
} else {
// Cap at gap - split proportionally based on out/in ratio
// This maintains the asymmetry (16.55 min out vs 3 min in)
@@ -845,9 +834,5 @@ func calculateSessionCosts(events []ParticipantEvent, cfg Config) (githubHours,
// Last session: context out
contextTime += contextOut
- githubHours = githubTime.Hours()
- contextHours = contextTime.Hours()
- sessionCount := len(sessionGroups)
-
- return githubHours, contextHours, sessionCount
+ return githubTime.Hours(), contextTime.Hours(), len(sessionGroups)
}
diff --git a/pkg/cost/cost_test.go b/pkg/cost/cost_test.go
index 1621a25..2d3de6d 100644
--- a/pkg/cost/cost_test.go
+++ b/pkg/cost/cost_test.go
@@ -443,16 +443,7 @@ func TestCalculateWithRealPR13(t *testing.T) {
t.Skipf("Skipping real PR test: %v", err)
}
- // Extract JSON from the last line (prx outputs logs then JSON)
- lines := strings.Split(string(data), "\n")
- var jsonLine string
- for i := len(lines) - 1; i >= 0; i-- {
- if strings.HasPrefix(lines[i], "{") {
- jsonLine = lines[i]
- break
- }
- }
-
+ // Parse JSON (may be multi-line or single-line)
var prxData struct {
Events []struct {
Timestamp string `json:"timestamp"`
@@ -468,7 +459,7 @@ func TestCalculateWithRealPR13(t *testing.T) {
} `json:"pull_request"`
}
- if err := json.Unmarshal([]byte(jsonLine), &prxData); err != nil {
+ if err := json.Unmarshal(data, &prxData); err != nil {
t.Fatalf("Failed to parse PR data: %v", err)
}
diff --git a/pkg/cost/extrapolate.go b/pkg/cost/extrapolate.go
index c4caaf4..6f624fa 100644
--- a/pkg/cost/extrapolate.go
+++ b/pkg/cost/extrapolate.go
@@ -191,7 +191,7 @@ type ExtrapolatedBreakdown struct {
// The function computes the average cost per PR from the samples, then multiplies
// by the total PR count to estimate population-wide costs.
//
-//nolint:revive,maintidx // Complex calculation function benefits from cohesion
+//nolint:revive,maintidx,gocognit // Complex calculation function benefits from cohesion
func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actualOpenPRs int, daysInPeriod int, cfg Config, prs []PRSummaryInfo, repoVisibility map[string]bool) ExtrapolatedBreakdown {
// Count unique repositories and their visibility
uniqueRepos := make(map[string]bool)
diff --git a/pkg/github/fetch.go b/pkg/github/fetch.go
index a1e8aea..7931217 100644
--- a/pkg/github/fetch.go
+++ b/pkg/github/fetch.go
@@ -88,7 +88,6 @@ func PRDataFromPRX(prData *prx.PullRequestData) cost.PRData {
// Returns:
// - cost.PRData with all information needed for cost calculation
func FetchPRData(ctx context.Context, prURL string, token string, updatedAt time.Time) (cost.PRData, error) {
- // Parse the PR URL to extract owner, repo, and PR number
owner, repo, number, err := parsePRURL(prURL)
if err != nil {
slog.Error("Failed to parse PR URL", "url", prURL, "error", err)
@@ -97,46 +96,33 @@ func FetchPRData(ctx context.Context, prURL string, token string, updatedAt time
slog.Debug("Parsed PR URL", "owner", owner, "repo", repo, "number", number)
- // Get cache directory from user's cache directory
- userCacheDir, err := os.UserCacheDir()
- if err != nil {
- slog.Warn("Failed to get cache directory, using non-cached client", "error", err)
- // Fallback to non-cached client
- client := prx.NewClient(token)
- prData, err := client.PullRequest(ctx, owner, repo, number)
- if err != nil {
- slog.Error("GitHub API call failed", "owner", owner, "repo", repo, "pr", number, "error", err)
- return cost.PRData{}, fmt.Errorf("failed to fetch PR data: %w", err)
+ // Try to use cache client if possible, fall back to non-cached client
+ var prData *prx.PullRequestData
+ cacheDir := ""
+ if userCacheDir, err := os.UserCacheDir(); err == nil {
+ cacheDir = filepath.Join(userCacheDir, "prcost")
+ if err := os.MkdirAll(cacheDir, 0o700); err != nil {
+ slog.Warn("Failed to create cache directory, using non-cached client", "error", err)
+ cacheDir = ""
}
- result := PRDataFromPRX(prData)
- return result, nil
+ } else {
+ slog.Warn("Failed to get cache directory, using non-cached client", "error", err)
}
- cacheDir := filepath.Join(userCacheDir, "prcost")
- if err := os.MkdirAll(cacheDir, 0o700); err != nil {
- slog.Warn("Failed to create cache directory, using non-cached client", "error", err)
- // Fallback to non-cached client
- client := prx.NewClient(token)
- prData, err := client.PullRequest(ctx, owner, repo, number)
- if err != nil {
- slog.Error("GitHub API call failed", "owner", owner, "repo", repo, "pr", number, "error", err)
- return cost.PRData{}, fmt.Errorf("failed to fetch PR data: %w", err)
+ if cacheDir != "" {
+ client, cErr := prx.NewCacheClient(token, cacheDir)
+ if cErr != nil {
+ slog.Error("Failed to create cache client", "error", cErr)
+ return cost.PRData{}, fmt.Errorf("failed to create cache client: %w", cErr)
}
- result := PRDataFromPRX(prData)
- return result, nil
- }
-
- // Create prx cache client for disk-based caching
- client, err := prx.NewCacheClient(token, cacheDir)
- if err != nil {
- slog.Error("Failed to create cache client", "error", err)
- return cost.PRData{}, fmt.Errorf("failed to create cache client: %w", err)
+ slog.Debug("Calling GitHub API via prx cache client",
+ "owner", owner, "repo", repo, "pr", number, "updated_at", updatedAt.Format(time.RFC3339))
+ prData, err = client.PullRequest(ctx, owner, repo, number, updatedAt)
+ } else {
+ client := prx.NewClient(token)
+ prData, err = client.PullRequest(ctx, owner, repo, number)
}
- // Fetch PR data using prx (prx has built-in retry logic and caching)
- // Pass updatedAt for effective cache validation
- slog.Debug("Calling GitHub API via prx cache client", "owner", owner, "repo", repo, "pr", number, "updated_at", updatedAt.Format(time.RFC3339))
- prData, err := client.PullRequest(ctx, owner, repo, number, updatedAt)
if err != nil {
slog.Error("GitHub API call failed", "owner", owner, "repo", repo, "pr", number, "error", err)
return cost.PRData{}, fmt.Errorf("failed to fetch PR data: %w", err)
@@ -148,7 +134,6 @@ func FetchPRData(ctx context.Context, prURL string, token string, updatedAt time
"author", prData.PullRequest.Author,
"total_events", len(prData.Events))
- // Convert to cost.PRData
result := PRDataFromPRX(prData)
slog.Debug("Converted PR data", "human_events", len(result.Events))
return result, nil
diff --git a/pkg/github/query.go b/pkg/github/query.go
index 164efcc..39b1853 100644
--- a/pkg/github/query.go
+++ b/pkg/github/query.go
@@ -3,15 +3,48 @@ package github
import (
"bytes"
"context"
+ "crypto/sha256"
+ "encoding/hex"
"encoding/json"
"fmt"
"log/slog"
"net/http"
"sort"
+ "strconv"
"strings"
"time"
)
+// Cache is a simple interface for caching any type of data.
+// The server provides this via fido, with proper TTLs for each data type.
+type Cache interface {
+ Get(ctx context.Context, key string) (any, bool)
+ Set(ctx context.Context, key string, value any)
+}
+
+// Client provides cached access to GitHub GraphQL APIs.
+type Client struct {
+ cache Cache
+}
+
+// NewClient creates a GitHub client with caching.
+// If cache is nil, caching is disabled (queries will always hit GitHub).
+func NewClient(cache Cache) *Client {
+ return &Client{cache: cache}
+}
+
+// noopCache implements Cache but never caches anything.
+type noopCache struct{}
+
+func (noopCache) Get(context.Context, string) (any, bool) { return nil, false }
+func (noopCache) Set(context.Context, string, any) {}
+
+// NewClientWithoutCache creates a GitHub client that never caches.
+// Use this for CLI tools or one-off queries.
+func NewClientWithoutCache() *Client {
+ return &Client{cache: noopCache{}}
+}
+
// PRSummary holds minimal information about a PR for sampling and fetching.
type PRSummary struct {
UpdatedAt time.Time
@@ -73,7 +106,40 @@ func IsBot(authorType, authorLogin string) bool {
return false
}
-// FetchPRsFromRepo queries GitHub GraphQL API for all PRs in a repository
+// QueryHash computes a SHA256 hash of a GraphQL query string for cache key generation.
+// This ensures that cache keys change when query structure changes.
+func QueryHash(query string) string {
+ hash := sha256.Sum256([]byte(query))
+ return hex.EncodeToString(hash[:8]) // Use first 8 bytes (16 hex chars) for brevity
+}
+
+// CacheKey generates a cache key from a GraphQL query template and its parameters.
+// The key includes a hash of the query text so cache is invalidated when the query changes.
+//
+// Parameters:
+// - queryTemplate: The GraphQL query text (can include %s placeholders before fmt.Sprintf)
+// - params: Key-value pairs describing the query parameters (e.g., "org", "myorg", "days", "30")
+//
+// Returns: A cache key like "org=myorg:days=30:qh=a1b2c3d4".
+func CacheKey(queryTemplate string, params ...string) string {
+ if len(params)%2 != 0 {
+ panic("CacheKey requires even number of params (key-value pairs)")
+ }
+
+ // Build parameter portion of cache key
+ var keyParts []string
+ for i := 0; i < len(params); i += 2 {
+ keyParts = append(keyParts, fmt.Sprintf("%s=%v", params[i], params[i+1]))
+ }
+
+ // Add query hash to invalidate cache when query structure changes
+ queryHash := QueryHash(queryTemplate)
+ keyParts = append(keyParts, fmt.Sprintf("qh=%s", queryHash))
+
+ return strings.Join(keyParts, ":")
+}
+
+// FetchPRsFromRepo queries GitHub GraphQL API (with caching) for all PRs in a repository
// modified since the specified date.
//
// Uses an adaptive multi-query strategy for comprehensive time coverage:
@@ -81,18 +147,58 @@ func IsBot(authorType, authorLogin string) bool {
// 2. If hit limit, query old activity (updated ASC) - get ~500 more
// 3. Check gap between oldest "recent" and newest "old"
// 4. If gap > 1 week, query early period (created ASC) - get ~250 more
-//
-// Parameters:
-// - ctx: Context for the API call
-// - owner: GitHub repository owner
-// - repo: GitHub repository name
-// - since: Only include PRs updated after this time
-// - token: GitHub authentication token
-// - progress: Optional callback for progress updates (can be nil)
-//
-// Returns:
-// - Slice of PRSummary for all matching PRs (deduplicated)
-func FetchPRsFromRepo(ctx context.Context, owner, repo string, since time.Time, token string, progress ProgressCallback) ([]PRSummary, error) {
+func (c *Client) FetchPRsFromRepo(
+ ctx context.Context, owner, repo string, since time.Time, token string, progress ProgressCallback,
+) ([]PRSummary, error) {
+ // Define query template for cache key
+ queryTemplate := `
+ query($owner: String!, $name: String!, $cursor: String) {
+ repository(owner: $owner, name: $name) {
+ pullRequests(first: 100, after: $cursor, orderBy: {field: %s, direction: %s}) {
+ totalCount
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ nodes {
+ number
+ createdAt
+ updatedAt
+ closedAt
+ state
+ merged
+ author {
+ login
+ __typename
+ }
+ }
+ }
+ }
+ }`
+
+ days := int(time.Since(since).Hours() / 24)
+ cacheKey := CacheKey(queryTemplate, "owner", owner, "repo", repo, "days", strconv.Itoa(days))
+
+ // Check cache
+ if cached, found := c.cache.Get(ctx, cacheKey); found {
+ if prs, ok := cached.([]PRSummary); ok {
+ return prs, nil
+ }
+ }
+
+ // Cache miss - fetch from GitHub
+ prs, err := fetchPRsFromRepo(ctx, owner, repo, since, token, progress)
+ if err != nil {
+ return nil, err
+ }
+
+ // Store in cache
+ c.cache.Set(ctx, cacheKey, prs)
+ return prs, nil
+}
+
+// fetchPRsFromRepo is the internal implementation.
+func fetchPRsFromRepo(ctx context.Context, owner, repo string, since time.Time, token string, progress ProgressCallback) ([]PRSummary, error) {
// Query 1: Recent activity (updated DESC) - get up to 1000 PRs
recent, hitLimit, err := fetchPRsFromRepoWithSort(ctx, repoSortParams{
owner: owner, repo: repo, since: since, token: token,
@@ -172,14 +278,19 @@ type repoSortParams struct {
}
// fetchPRsFromRepoWithSort queries GitHub GraphQL API with configurable sort order.
-// Returns PRs and a boolean indicating if the API limit (1000) was hit.
-func fetchPRsFromRepoWithSort(ctx context.Context, params repoSortParams) ([]PRSummary, bool, error) {
+// Returns PRs, a boolean indicating if the API limit (1000) was hit, and a query hash for caching.
+func fetchPRsFromRepoWithSort(
+ ctx context.Context, params repoSortParams,
+) (prs []PRSummary, hitLimit bool, err error) {
owner, repo := params.owner, params.repo
since, token := params.since, params.token
field, direction := params.field, params.direction
maxPRs, queryName := params.maxPRs, params.queryName
progress := params.progress
- query := fmt.Sprintf(`
+
+ // Build the GraphQL query - note we use %s placeholders for field/direction which vary
+ // but the hash is computed from the base structure to detect field changes (like adding __typename)
+ queryTemplate := `
query($owner: String!, $name: String!, $cursor: String) {
repository(owner: $owner, name: $name) {
pullRequests(first: 100, after: $cursor, orderBy: {field: %s, direction: %s}) {
@@ -202,12 +313,13 @@ func fetchPRsFromRepoWithSort(ctx context.Context, params repoSortParams) ([]PRS
}
}
}
- }`, field, direction)
+ }`
+ query := fmt.Sprintf(queryTemplate, field, direction)
var allPRs []PRSummary
var cursor *string
pageNum := 0
- hitLimit := false
+ hitLimit = false
for {
pageNum++
@@ -379,7 +491,7 @@ func deduplicatePRs(prs []PRSummary) []PRSummary {
return unique
}
-// FetchPRsFromOrg queries GitHub GraphQL Search API for all PRs across
+// FetchPRsFromOrg queries GitHub GraphQL Search API (with caching) for all PRs across
// an organization modified since the specified date.
//
// Uses an adaptive multi-query strategy for comprehensive time coverage:
@@ -387,17 +499,62 @@ func deduplicatePRs(prs []PRSummary) []PRSummary {
// 2. If hit limit, query old activity (updated asc) - get ~500 more
// 3. Check gap between oldest "recent" and newest "old"
// 4. If gap > 1 week, query early period (created asc) - get ~250 more
-//
-// Parameters:
-// - ctx: Context for the API call
-// - org: GitHub organization name
-// - since: Only include PRs updated after this time
-// - token: GitHub authentication token
-// - progress: Optional callback for progress updates (can be nil)
-//
-// Returns:
-// - Slice of PRSummary for all matching PRs (deduplicated)
-func FetchPRsFromOrg(ctx context.Context, org string, since time.Time, token string, progress ProgressCallback) ([]PRSummary, error) {
+func (c *Client) FetchPRsFromOrg(ctx context.Context, org string, since time.Time, token string, progress ProgressCallback) ([]PRSummary, error) {
+ // Define query template for cache key
+ queryTemplate := `
+ query($searchQuery: String!, $cursor: String) {
+ search(query: $searchQuery, type: ISSUE, first: 100, after: $cursor) {
+ issueCount
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ nodes {
+ ... on PullRequest {
+ number
+ createdAt
+ updatedAt
+ closedAt
+ state
+ merged
+ author {
+ login
+ __typename
+ }
+ repository {
+ owner {
+ login
+ }
+ name
+ }
+ }
+ }
+ }
+ }`
+
+ days := int(time.Since(since).Hours() / 24)
+ cacheKey := CacheKey(queryTemplate, "org", org, "days", strconv.Itoa(days))
+
+ // Check cache
+ if cached, found := c.cache.Get(ctx, cacheKey); found {
+ if prs, ok := cached.([]PRSummary); ok {
+ return prs, nil
+ }
+ }
+
+ // Cache miss - fetch from GitHub
+ prs, err := fetchPRsFromOrg(ctx, org, since, token, progress)
+ if err != nil {
+ return nil, err
+ }
+
+ // Store in cache
+ c.cache.Set(ctx, cacheKey, prs)
+ return prs, nil
+}
+
+// fetchPRsFromOrg is the internal implementation.
+func fetchPRsFromOrg(ctx context.Context, org string, since time.Time, token string, progress ProgressCallback) ([]PRSummary, error) {
sinceStr := since.Format("2006-01-02")
// Query 1: Recent activity (updated desc) - get up to 1000 PRs
@@ -482,8 +639,10 @@ type orgSortParams struct {
}
// fetchPRsFromOrgWithSort queries GitHub Search API with configurable sort order.
-// Returns PRs and a boolean indicating if the API limit (1000) was hit.
-func fetchPRsFromOrgWithSort(ctx context.Context, params orgSortParams) ([]PRSummary, bool, error) {
+// Returns PRs, a boolean indicating if the API limit (1000) was hit, and a query hash for caching.
+func fetchPRsFromOrgWithSort(
+ ctx context.Context, params orgSortParams,
+) (prs []PRSummary, hitLimit bool, err error) {
org, sinceStr := params.org, params.sinceStr
token := params.token
field, direction := params.field, params.direction
@@ -493,7 +652,7 @@ func fetchPRsFromOrgWithSort(ctx context.Context, params orgSortParams) ([]PRSum
// Query format: org:myorg is:pr updated:>2025-07-25 sort:updated-desc
searchQuery := fmt.Sprintf("org:%s is:pr %s:>%s sort:%s-%s", org, field, sinceStr, field, direction)
- const query = `
+ const queryTemplate = `
query($searchQuery: String!, $cursor: String) {
search(query: $searchQuery, type: ISSUE, first: 100, after: $cursor) {
issueCount
@@ -527,7 +686,7 @@ func fetchPRsFromOrgWithSort(ctx context.Context, params orgSortParams) ([]PRSum
var allPRs []PRSummary
var cursor *string
pageNum := 0
- hitLimit := false
+ hitLimit = false
for {
pageNum++
@@ -540,7 +699,7 @@ func fetchPRsFromOrgWithSort(ctx context.Context, params orgSortParams) ([]PRSum
}
requestBody := map[string]any{
- "query": query,
+ "query": queryTemplate,
"variables": variables,
}
@@ -627,18 +786,18 @@ func fetchPRsFromOrgWithSort(ctx context.Context, params orgSortParams) ([]PRSum
"has_next_page", hasNextPage)
// Collect PRs from this page
- for _, node := range result.Data.Search.Nodes {
+ for i := range result.Data.Search.Nodes {
allPRs = append(allPRs, PRSummary{
- Owner: node.Repository.Owner.Login,
- Repo: node.Repository.Name,
- Number: node.Number,
- Author: node.Author.Login,
- AuthorType: node.Author.TypeName,
- CreatedAt: node.CreatedAt,
- UpdatedAt: node.UpdatedAt,
- ClosedAt: node.ClosedAt,
- State: node.State,
- Merged: node.Merged,
+ Owner: result.Data.Search.Nodes[i].Repository.Owner.Login,
+ Repo: result.Data.Search.Nodes[i].Repository.Name,
+ Number: result.Data.Search.Nodes[i].Number,
+ Author: result.Data.Search.Nodes[i].Author.Login,
+ AuthorType: result.Data.Search.Nodes[i].Author.TypeName,
+ CreatedAt: result.Data.Search.Nodes[i].CreatedAt,
+ UpdatedAt: result.Data.Search.Nodes[i].UpdatedAt,
+ ClosedAt: result.Data.Search.Nodes[i].ClosedAt,
+ State: result.Data.Search.Nodes[i].State,
+ Merged: result.Data.Search.Nodes[i].Merged,
})
// Check if we've hit the maxPRs limit
@@ -677,11 +836,11 @@ func deduplicatePRsByOwnerRepoNumber(prs []PRSummary) []PRSummary {
seen := make(map[key]bool)
var unique []PRSummary
- for _, pr := range prs {
- k := key{owner: pr.Owner, repo: pr.Repo, number: pr.Number}
+ for i := range prs {
+ k := key{owner: prs[i].Owner, repo: prs[i].Repo, number: prs[i].Number}
if !seen[k] {
seen[k] = true
- unique = append(unique, pr)
+ unique = append(unique, prs[i])
}
}
@@ -696,8 +855,8 @@ func deduplicatePRsByOwnerRepoNumber(prs []PRSummary) []PRSummary {
// CountBotPRs counts how many PRs in the list are authored by bots.
func CountBotPRs(prs []PRSummary) int {
count := 0
- for _, pr := range prs {
- if IsBot(pr.AuthorType, pr.Author) {
+ for i := range prs {
+ if IsBot(prs[i].AuthorType, prs[i].Author) {
count++
}
}
@@ -768,11 +927,11 @@ func SamplePRs(prs []PRSummary, sampleSize int) []PRSummary {
}
// Assign PRs to buckets
- for _, pr := range sorted {
+ for j := range sorted {
for i := range buckets {
- if (pr.UpdatedAt.After(buckets[i].startTime) || pr.UpdatedAt.Equal(buckets[i].startTime)) &&
- (pr.UpdatedAt.Before(buckets[i].endTime) || pr.UpdatedAt.Equal(buckets[i].endTime)) {
- buckets[i].prs = append(buckets[i].prs, pr)
+ if (sorted[j].UpdatedAt.After(buckets[i].startTime) || sorted[j].UpdatedAt.Equal(buckets[i].startTime)) &&
+ (sorted[j].UpdatedAt.Before(buckets[i].endTime) || sorted[j].UpdatedAt.Equal(buckets[i].endTime)) {
+ buckets[i].prs = append(buckets[i].prs, sorted[j])
break
}
}
@@ -792,13 +951,13 @@ func SamplePRs(prs []PRSummary, sampleSize int) []PRSummary {
// If some buckets were empty, fill with nearest unused PRs
if len(samples) < sampleSize {
- for _, pr := range sorted {
+ for j := range sorted {
if len(samples) >= sampleSize {
break
}
- if !used[pr.Number] {
- samples = append(samples, pr)
- used[pr.Number] = true
+ if !used[sorted[j].Number] {
+ samples = append(samples, sorted[j])
+ used[sorted[j].Number] = true
}
}
}
@@ -810,9 +969,9 @@ func SamplePRs(prs []PRSummary, sampleSize int) []PRSummary {
// Bot authors are excluded from the count.
func CountUniqueAuthors(prs []PRSummary) int {
uniqueAuthors := make(map[string]bool)
- for _, pr := range prs {
- if !IsBot(pr.AuthorType, pr.Author) {
- uniqueAuthors[pr.Author] = true
+ for i := range prs {
+ if !IsBot(prs[i].AuthorType, prs[i].Author) {
+ uniqueAuthors[prs[i].Author] = true
}
}
return len(uniqueAuthors)
@@ -854,18 +1013,37 @@ func CalculateActualTimeWindow(prs []PRSummary, requestedDays int) (actualDays i
return requestedDays, false
}
-// CountOpenPRsInRepo queries GitHub GraphQL API to get the total count of open PRs in a repository
-// that were created more than 24 hours ago (PRs open <24 hours don't count as tracking overhead yet).
-//
-// Parameters:
-// - ctx: Context for the API call
-// - owner: GitHub repository owner
-// - repo: GitHub repository name
-// - token: GitHub authentication token
-//
-// Returns:
-// - count: Number of open PRs created >24 hours ago
-func CountOpenPRsInRepo(ctx context.Context, owner, repo, token string) (int, error) {
+// CountOpenPRsInRepo queries GitHub GraphQL API (with caching) to get the total count
+// of open PRs in a repository that were created more than 24 hours ago.
+func (c *Client) CountOpenPRsInRepo(ctx context.Context, owner, repo, token string) (int, error) {
+ queryTemplate := `query($searchQuery: String!) {
+ search(query: $searchQuery, type: ISSUE, first: 0) {
+ issueCount
+ }
+ }`
+
+ cacheKey := CacheKey(queryTemplate, "owner", owner, "repo", repo)
+
+ // Check cache
+ if cached, found := c.cache.Get(ctx, cacheKey); found {
+ if count, ok := cached.(int); ok {
+ return count, nil
+ }
+ }
+
+ // Cache miss - fetch from GitHub
+ count, err := countOpenPRsInRepo(ctx, owner, repo, token)
+ if err != nil {
+ return 0, err
+ }
+
+ // Store in cache
+ c.cache.Set(ctx, cacheKey, count)
+ return count, nil
+}
+
+// countOpenPRsInRepo is the internal implementation (lowercase = package-private).
+func countOpenPRsInRepo(ctx context.Context, owner, repo, token string) (int, error) {
// Only count PRs created more than 24 hours ago
// Use search API which supports created date filtering
twentyFourHoursAgo := time.Now().Add(-24 * time.Hour).Format("2006-01-02T15:04:05Z")
@@ -944,10 +1122,38 @@ func CountOpenPRsInRepo(ctx context.Context, owner, repo, token string) (int, er
return count, nil
}
-// CountOpenPRsInOrg counts all open PRs across an entire GitHub organization with a single GraphQL query.
-// This is much more efficient than counting PRs repo-by-repo for organizations with many repositories.
-// Only counts PRs created more than 24 hours ago to exclude brand-new PRs.
-func CountOpenPRsInOrg(ctx context.Context, org, token string) (int, error) {
+// CountOpenPRsInOrg queries GitHub GraphQL API (with caching) to count all open PRs
+// across an entire organization. More efficient than counting repo-by-repo.
+// Only counts PRs created more than 24 hours ago.
+func (c *Client) CountOpenPRsInOrg(ctx context.Context, org, token string) (int, error) {
+ queryTemplate := `query($searchQuery: String!) {
+ search(query: $searchQuery, type: ISSUE, first: 0) {
+ issueCount
+ }
+ }`
+
+ cacheKey := CacheKey(queryTemplate, "org", org)
+
+ // Check cache
+ if cached, found := c.cache.Get(ctx, cacheKey); found {
+ if count, ok := cached.(int); ok {
+ return count, nil
+ }
+ }
+
+ // Cache miss - fetch from GitHub
+ count, err := countOpenPRsInOrg(ctx, org, token)
+ if err != nil {
+ return 0, err
+ }
+
+ // Store in cache
+ c.cache.Set(ctx, cacheKey, count)
+ return count, nil
+}
+
+// countOpenPRsInOrg is the internal implementation.
+func countOpenPRsInOrg(ctx context.Context, org, token string) (int, error) {
// Only count PRs created more than 24 hours ago
twentyFourHoursAgo := time.Now().Add(-24 * time.Hour).Format("2006-01-02T15:04:05Z")
@@ -1030,19 +1236,51 @@ type RepoVisibility struct {
IsPrivate bool
}
-// FetchOrgRepositoriesWithActivity fetches all repositories in an organization
-// that had activity (pushes) in the specified time period, along with their privacy status.
-// This is useful for determining which repositories were analyzed and whether they're public or private.
-//
-// Parameters:
-// - ctx: Context for the API call
-// - org: GitHub organization name
-// - since: Only include repos with activity after this time
-// - token: GitHub authentication token
-//
-// Returns:
-// - Map of repository name to RepoVisibility struct
-func FetchOrgRepositoriesWithActivity(ctx context.Context, org string, since time.Time, token string) (map[string]RepoVisibility, error) {
+// FetchOrgRepositoriesWithActivity queries GitHub GraphQL API (with caching) to fetch
+// all repositories in an organization that had activity in the specified time period,
+// along with their privacy status (public/private).
+func (c *Client) FetchOrgRepositoriesWithActivity(ctx context.Context, org string, since time.Time, token string) (map[string]RepoVisibility, error) {
+ queryTemplate := `
+ query($org: String!, $cursor: String) {
+ organization(login: $org) {
+ repositories(first: 100, after: $cursor, orderBy: {field: PUSHED_AT, direction: DESC}) {
+ pageInfo {
+ hasNextPage
+ endCursor
+ }
+ nodes {
+ name
+ isPrivate
+ pushedAt
+ }
+ }
+ }
+ }
+ `
+
+ days := int(time.Since(since).Hours() / 24)
+ cacheKey := CacheKey(queryTemplate, "org", org, "days", strconv.Itoa(days))
+
+ // Check cache
+ if cached, found := c.cache.Get(ctx, cacheKey); found {
+ if repoVis, ok := cached.(map[string]RepoVisibility); ok {
+ return repoVis, nil
+ }
+ }
+
+ // Cache miss - fetch from GitHub
+ repoVis, err := fetchOrgRepositoriesWithActivity(ctx, org, since, token)
+ if err != nil {
+ return nil, err
+ }
+
+ // Store in cache
+ c.cache.Set(ctx, cacheKey, repoVis)
+ return repoVis, nil
+}
+
+// fetchOrgRepositoriesWithActivity is the internal implementation.
+func fetchOrgRepositoriesWithActivity(ctx context.Context, org string, since time.Time, token string) (map[string]RepoVisibility, error) {
query := `
query($org: String!, $cursor: String) {
organization(login: $org) {
diff --git a/testdata/pr_13.json b/testdata/pr_13.json
index f4bf2d5..99bdea9 100644
--- a/testdata/pr_13.json
+++ b/testdata/pr_13.json
@@ -1,29 +1,93 @@
-2025/10/22 16:22:52 INFO cache miss: GraphQL pull request expired owner=machine-drivers repo=docker-machine-driver-hyperkit pr=13 cached_at=2025-10-22T16:12:58.375+02:00 reference_time=2025-10-22T16:22:52.168+02:00
-2025/10/22 16:22:52 INFO fetching pull request via GraphQL owner=machine-drivers repo=docker-machine-driver-hyperkit pr=13
-2025/10/22 16:22:52 INFO HTTP request starting method=POST url=https://api.github.com/graphql host=api.github.com
-2025/10/22 16:22:55 INFO HTTP response received status=200 url=https://api.github.com/graphql elapsed=3.7379905s
-2025/10/22 16:22:55 INFO GraphQL query completed cost=1 remaining=4988 limit=5000
-2025/10/22 16:22:55 INFO GitHub API request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:22:55 INFO HTTP request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" host=api.github.com
-2025/10/22 16:22:56 INFO HTTP response received status=403 url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" elapsed=835.236583ms
-2025/10/22 16:22:56 INFO GitHub API response received status="403 Forbidden" url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" elapsed=835.304333ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4986 X-RateLimit-Reset:1761143493 X-RateLimit-Resource:collaborators X-RateLimit-Used:14]"
-2025/10/22 16:22:56 ERROR GitHub API error status="403 Forbidden" status_code=403 url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" body="{\"message\":\"Must have push access to view repository collaborators.\",\"documentation_url\":\"https://docs.github.com/rest/collaborators/collaborators#list-repository-collaborators\",\"status\":\"403\"}" headers="map[Content-Type:[application/json; charset=utf-8] Date:[Wed, 22 Oct 2025 14:22:57 GMT] Server:[github.com] X-Accepted-Oauth-Scopes:[] X-Content-Type-Options:[nosniff] X-Frame-Options:[deny] X-Github-Api-Version-Selected:[2022-11-28] X-Github-Media-Type:[github.v3; format=json] X-Github-Request-Id:[0574:89405:CC015A:35D6176:68F8E8C1] X-Oauth-Client-Id:[178c6fc778ccc68e1d6a] X-Oauth-Scopes:[admin:public_key, gist, project, read:org, repo] X-Ratelimit-Limit:[5000] X-Ratelimit-Remaining:[4986] X-Ratelimit-Reset:[1761143493] X-Ratelimit-Resource:[collaborators] X-Ratelimit-Used:[14] X-Xss-Protection:[0]]"
-2025/10/22 16:22:56 WARN failed to fetch collaborators for write access check owner=machine-drivers repo=docker-machine-driver-hyperkit user=tstromberg error="github API error: 403 Forbidden"
-2025/10/22 16:22:56 INFO GitHub API request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:22:56 INFO HTTP request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" host=api.github.com
-2025/10/22 16:22:57 INFO HTTP response received status=403 url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" elapsed=864.379292ms
-2025/10/22 16:22:57 INFO GitHub API response received status="403 Forbidden" url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" elapsed=864.490875ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4985 X-RateLimit-Reset:1761143493 X-RateLimit-Resource:collaborators X-RateLimit-Used:15]"
-2025/10/22 16:22:57 ERROR GitHub API error status="403 Forbidden" status_code=403 url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/collaborators?affiliation=all&per_page=100" body="{\"message\":\"Must have push access to view repository collaborators.\",\"documentation_url\":\"https://docs.github.com/rest/collaborators/collaborators#list-repository-collaborators\",\"status\":\"403\"}" headers="map[Content-Type:[application/json; charset=utf-8] Date:[Wed, 22 Oct 2025 14:22:58 GMT] Server:[github.com] X-Accepted-Oauth-Scopes:[] X-Content-Type-Options:[nosniff] X-Frame-Options:[deny] X-Github-Api-Version-Selected:[2022-11-28] X-Github-Media-Type:[github.v3; format=json] X-Github-Request-Id:[0574:89405:CC0464:35D6DB3:68F8E8C1] X-Oauth-Client-Id:[178c6fc778ccc68e1d6a] X-Oauth-Scopes:[admin:public_key, gist, project, read:org, repo] X-Ratelimit-Limit:[5000] X-Ratelimit-Remaining:[4985] X-Ratelimit-Reset:[1761143493] X-Ratelimit-Resource:[collaborators] X-Ratelimit-Used:[15] X-Xss-Protection:[0]]"
-2025/10/22 16:22:57 WARN failed to fetch collaborators for write access check owner=machine-drivers repo=docker-machine-driver-hyperkit user=tstromberg error="github API error: 403 Forbidden"
-2025/10/22 16:22:57 INFO GitHub API request starting method=GET url=https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/rulesets headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:22:57 INFO HTTP request starting method=GET url=https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/rulesets host=api.github.com
-2025/10/22 16:22:58 INFO HTTP response received status=200 url=https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/rulesets elapsed=909.665375ms
-2025/10/22 16:22:58 INFO GitHub API response received status="200 OK" url=https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/rulesets elapsed=909.742417ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4911 X-RateLimit-Reset:1761144274 X-RateLimit-Resource:core X-RateLimit-Used:89]"
-2025/10/22 16:22:58 INFO fetched required checks from rulesets count=0 checks=[]
-2025/10/22 16:22:58 INFO GitHub API request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/commits/7e53423cd01166d6731f244bbb9af75e54c2a955/check-runs?per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:22:58 INFO HTTP request starting method=GET url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/commits/7e53423cd01166d6731f244bbb9af75e54c2a955/check-runs?per_page=100" host=api.github.com
-2025/10/22 16:22:59 INFO HTTP response received status=200 url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/commits/7e53423cd01166d6731f244bbb9af75e54c2a955/check-runs?per_page=100" elapsed=1.048200458s
-2025/10/22 16:22:59 INFO GitHub API response received status="200 OK" url="https://api.github.com/repos/machine-drivers/docker-machine-driver-hyperkit/commits/7e53423cd01166d6731f244bbb9af75e54c2a955/check-runs?per_page=100" elapsed=1.048283417s rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4910 X-RateLimit-Reset:1761144274 X-RateLimit-Resource:core X-RateLimit-Used:90]"
-2025/10/22 16:22:59 INFO fetched check runs via REST count=0
-2025/10/22 16:22:59 INFO successfully fetched pull request via hybrid GraphQL+REST owner=machine-drivers repo=docker-machine-driver-hyperkit pr=13 event_count=10 api_calls_made="2 (vs 13+ with REST)"
-{"events":[{"timestamp":"2019-09-11T22:07:29Z","kind":"commit","actor":"sharifelgamal","body":"Moving minikube hyperkit changes upstream"},{"timestamp":"2019-09-11T22:14:46Z","kind":"commit","actor":"sharifelgamal","body":"remove minikube import"},{"timestamp":"2019-09-11T22:15:12Z","kind":"pr_opened","actor":"sharifelgamal","body":"```commit 48d2ace5ccb2ecbd91362d0807ef974532cfff6d\r\nAuthor: Thomas Stromberg \u003ctstromberg@google.com\u003e\r\nDate: Thu Aug 22 11:41:41 2019 -0700\r\n\r\n Remove broken default disk size test that didn't make sense\r\n\r\ncommit a817bffab1752e8956f68a88cf2548f2dc2932","write_access":-1},{"timestamp":"2019-09-11T22:16:36Z","kind":"cross_referenced","actor":"sharifelgamal"},{"timestamp":"2019-09-13T03:33:59Z","kind":"review","actor":"tstromberg","outcome":"approved","body":"Looks good. Could you test and confirm that this driver works for both`docker-machine` and `minikube`?","write_access":1,"question":true},{"timestamp":"2019-09-13T23:24:01Z","kind":"commit","actor":"sharifelgamal","body":"add missing files"},{"timestamp":"2019-09-14T00:04:35Z","kind":"comment","actor":"sharifelgamal","body":"I've verified that it works with `minikube` and `docker-machine` is documented as not working with hyperkit.","write_access":-1},{"timestamp":"2019-09-24T18:40:05Z","kind":"cross_referenced","actor":"dx9"},{"timestamp":"2019-10-02T23:15:42Z","kind":"cross_referenced","actor":"cequencer"},{"timestamp":"2025-07-16T14:55:18Z","kind":"review","actor":"tstromberg","outcome":"approved","write_access":1}],"pull_request":{"created_at":"2019-09-11T22:15:12Z","updated_at":"2025-07-16T14:55:18Z","approval_summary":{"approvals_with_write_access":0,"approvals_with_unknown_access":1,"approvals_without_write_access":0,"changes_requested":0},"check_summary":{"success":{},"failing":{},"pending":{},"cancelled":{},"skipped":{},"stale":{},"neutral":{}},"mergeable":null,"assignees":[],"reviewers":{"tstromberg":"approved"},"participant_access":{"cequencer":0,"dx9":0,"sharifelgamal":0,"tstromberg":1},"mergeable_state":"clean","mergeable_state_description":"PR is ready to merge","author":"sharifelgamal","body":"```commit 48d2ace5ccb2ecbd91362d0807ef974532cfff6d\r\nAuthor: Thomas Stromberg \u003ctstromberg@google.com\u003e\r\nDate: Thu Aug 22 11:41:41 2019 -0700\r\n\r\n Remove broken default disk size test that didn't make sense\r\n\r\ncommit a817bffab1752e8956f68a88cf2548f2dc2932","title":"Moving minikube hyperkit changes upstream","state":"open","head_sha":"7e53423cd01166d6731f244bbb9af75e54c2a955","number":13,"changed_files":11,"deletions":306,"additions":638,"author_write_access":-1,"author_bot":false,"merged":false,"draft":false}}
+{
+ "events": [
+ {
+ "timestamp": "2019-09-11T22:07:29Z",
+ "kind": "commit",
+ "actor": "sharifelgamal",
+ "body": "Moving minikube hyperkit changes upstream"
+ },
+ {
+ "timestamp": "2019-09-11T22:14:46Z",
+ "kind": "commit",
+ "actor": "sharifelgamal",
+ "body": "remove minikube import"
+ },
+ {
+ "timestamp": "2019-09-11T22:15:12Z",
+ "kind": "pr_opened",
+ "actor": "sharifelgamal",
+ "body": "```commit 48d2ace5ccb2ecbd91362d0807ef974532cfff6d\r\nAuthor: Thomas Stromberg \u003ctstromberg@google.com\u003e\r\nDate: Thu Aug 22 11:41:41 2019 -0700\r\n\r\n Remove broken default disk size test that didn't make sense\r\n\r\ncommit a817bffab1752e8956f68a88cf2548f2dc2932",
+ "write_access": -1
+ },
+ { "timestamp": "2019-09-11T22:16:36Z", "kind": "cross_referenced", "actor": "sharifelgamal" },
+ {
+ "timestamp": "2019-09-13T03:33:59Z",
+ "kind": "review",
+ "actor": "tstromberg",
+ "outcome": "approved",
+ "body": "Looks good. Could you test and confirm that this driver works for both`docker-machine` and `minikube`?",
+ "write_access": 1,
+ "question": true
+ },
+ {
+ "timestamp": "2019-09-13T23:24:01Z",
+ "kind": "commit",
+ "actor": "sharifelgamal",
+ "body": "add missing files"
+ },
+ {
+ "timestamp": "2019-09-14T00:04:35Z",
+ "kind": "comment",
+ "actor": "sharifelgamal",
+ "body": "I've verified that it works with `minikube` and `docker-machine` is documented as not working with hyperkit.",
+ "write_access": -1
+ },
+ { "timestamp": "2019-09-24T18:40:05Z", "kind": "cross_referenced", "actor": "dx9" },
+ { "timestamp": "2019-10-02T23:15:42Z", "kind": "cross_referenced", "actor": "cequencer" },
+ {
+ "timestamp": "2025-07-16T14:55:18Z",
+ "kind": "review",
+ "actor": "tstromberg",
+ "outcome": "approved",
+ "write_access": 1
+ }
+ ],
+ "pull_request": {
+ "created_at": "2019-09-11T22:15:12Z",
+ "updated_at": "2025-07-16T14:55:18Z",
+ "approval_summary": {
+ "approvals_with_write_access": 0,
+ "approvals_with_unknown_access": 1,
+ "approvals_without_write_access": 0,
+ "changes_requested": 0
+ },
+ "check_summary": {
+ "success": {},
+ "failing": {},
+ "pending": {},
+ "cancelled": {},
+ "skipped": {},
+ "stale": {},
+ "neutral": {}
+ },
+ "mergeable": null,
+ "assignees": [],
+ "reviewers": { "tstromberg": "approved" },
+ "participant_access": { "cequencer": 0, "dx9": 0, "sharifelgamal": 0, "tstromberg": 1 },
+ "mergeable_state": "clean",
+ "mergeable_state_description": "PR is ready to merge",
+ "author": "sharifelgamal",
+ "body": "```commit 48d2ace5ccb2ecbd91362d0807ef974532cfff6d\r\nAuthor: Thomas Stromberg \u003ctstromberg@google.com\u003e\r\nDate: Thu Aug 22 11:41:41 2019 -0700\r\n\r\n Remove broken default disk size test that didn't make sense\r\n\r\ncommit a817bffab1752e8956f68a88cf2548f2dc2932",
+ "title": "Moving minikube hyperkit changes upstream",
+ "state": "open",
+ "head_sha": "7e53423cd01166d6731f244bbb9af75e54c2a955",
+ "number": 13,
+ "changed_files": 11,
+ "deletions": 306,
+ "additions": 638,
+ "author_write_access": -1,
+ "author_bot": false,
+ "merged": false,
+ "draft": false
+ }
+}
diff --git a/testdata/pr_1753.json b/testdata/pr_1753.json
index 657dd66..7af7f61 100644
--- a/testdata/pr_1753.json
+++ b/testdata/pr_1753.json
@@ -1,17 +1,127 @@
-2025/10/22 16:11:01 INFO cache miss: GraphQL pull request expired owner=chainguard-dev repo=apko pr=1753 cached_at=2025-10-22T13:31:26.303+02:00 reference_time=2025-10-22T16:11:01.371+02:00
-2025/10/22 16:11:01 INFO fetching pull request via GraphQL owner=chainguard-dev repo=apko pr=1753
-2025/10/22 16:11:01 INFO HTTP request starting method=POST url=https://api.github.com/graphql host=api.github.com
-2025/10/22 16:11:05 INFO HTTP response received status=200 url=https://api.github.com/graphql elapsed=4.575621292s
-2025/10/22 16:11:05 INFO GraphQL query completed cost=1 remaining=4990 limit=5000
-2025/10/22 16:11:05 INFO GitHub API request starting method=GET url=https://api.github.com/repos/chainguard-dev/apko/rulesets headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:11:05 INFO HTTP request starting method=GET url=https://api.github.com/repos/chainguard-dev/apko/rulesets host=api.github.com
-2025/10/22 16:11:07 INFO HTTP response received status=200 url=https://api.github.com/repos/chainguard-dev/apko/rulesets elapsed=1.1206925s
-2025/10/22 16:11:07 INFO GitHub API response received status="200 OK" url=https://api.github.com/repos/chainguard-dev/apko/rulesets elapsed=1.120866375s rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4941 X-RateLimit-Reset:1761144274 X-RateLimit-Resource:core X-RateLimit-Used:59]"
-2025/10/22 16:11:07 INFO fetched required checks from rulesets count=0 checks=[]
-2025/10/22 16:11:07 INFO GitHub API request starting method=GET url="https://api.github.com/repos/chainguard-dev/apko/commits/fa40fc009a9b4af0a4048b1335858ecb0d0f1019/check-runs?per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/22 16:11:07 INFO HTTP request starting method=GET url="https://api.github.com/repos/chainguard-dev/apko/commits/fa40fc009a9b4af0a4048b1335858ecb0d0f1019/check-runs?per_page=100" host=api.github.com
-2025/10/22 16:11:08 INFO HTTP response received status=200 url="https://api.github.com/repos/chainguard-dev/apko/commits/fa40fc009a9b4af0a4048b1335858ecb0d0f1019/check-runs?per_page=100" elapsed=998.862959ms
-2025/10/22 16:11:08 INFO GitHub API response received status="200 OK" url="https://api.github.com/repos/chainguard-dev/apko/commits/fa40fc009a9b4af0a4048b1335858ecb0d0f1019/check-runs?per_page=100" elapsed=999.018375ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4940 X-RateLimit-Reset:1761144274 X-RateLimit-Resource:core X-RateLimit-Used:60]"
-2025/10/22 16:11:08 INFO fetched check runs via REST count=4
-2025/10/22 16:11:08 INFO successfully fetched pull request via hybrid GraphQL+REST owner=chainguard-dev repo=apko pr=1753 event_count=11 api_calls_made="3 (vs 13+ with REST)"
-{"events":[{"timestamp":"2025-07-13T21:07:57Z","kind":"pr_opened","actor":"lxea","body":"/bin/s6-svscan has been moved to `/usr/bin/s6-svscan` since Alpine 3.21 which results in errors when trying to run containers using service bundles with any repos after 3.20 \r\n\r\n```\r\nError: crun: executable file `/bin/s6-svscan` not found: No such file or","write_access":-1},{"timestamp":"2025-07-13T21:13:49Z","kind":"head_ref_force_pushed","actor":"lxea"},{"timestamp":"2025-07-13T21:15:21Z","kind":"commit","actor":"lxea","body":"Use /usr/bin/s6-svscan if it exists"},{"timestamp":"2025-07-13T21:15:26Z","kind":"head_ref_force_pushed","actor":"lxea"},{"timestamp":"2025-07-18T00:43:12Z","kind":"review","actor":"tstromberg","outcome":"approved","write_access":-1},{"timestamp":"2025-10-19T22:08:13Z","kind":"commit","actor":"lxea","body":"Merge branch 'chainguard-dev:main' into s6-svscan-edge"},{"timestamp":"2025-10-19T22:08:16Z","kind":"check_run","actor":"github","outcome":"success","body":"Enforce - Commit Signing","description":"Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ❌ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n","bot":true},{"timestamp":"2025-10-19T22:08:20Z","kind":"check_run","actor":"github","outcome":"success","body":"StepSecurity Required Checks","description":"StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n","bot":true},{"timestamp":"2025-10-19T22:08:23Z","kind":"check_run","actor":"github","outcome":"success","body":"StepSecurity Optional Checks","description":"StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n","bot":true},{"timestamp":"2025-10-19T22:08:37Z","kind":"check_run","actor":"github","outcome":"skipped","body":"StepSecurity Harden-Runner","description":"No Workflow Runs Found: No new Harden-Runner detections for this pull request.","bot":true},{"timestamp":"2025-10-20T06:25:55Z","kind":"review","actor":"tstromberg","outcome":"approved","body":"Still LGTM, but needs an approver with write access :(","write_access":-1}],"pull_request":{"created_at":"2025-07-13T21:07:57Z","updated_at":"2025-10-20T06:25:55Z","approval_summary":{"approvals_with_write_access":0,"approvals_with_unknown_access":1,"approvals_without_write_access":0,"changes_requested":0},"check_summary":{"success":{"Enforce - Commit Signing":"Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ❌ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n","StepSecurity Optional Checks":"StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n","StepSecurity Required Checks":"StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n"},"failing":{},"pending":{"CodeQL":"Expected — Waiting for status to be reported","analyze":"Expected — Waiting for status to be reported","build":"Expected — Waiting for status to be reported","lint":"Expected — Waiting for status to be reported","test":"Expected — Waiting for status to be reported"},"cancelled":{},"skipped":{"StepSecurity Harden-Runner":"No Workflow Runs Found: No new Harden-Runner detections for this pull request."},"stale":{},"neutral":{}},"mergeable":false,"assignees":[],"reviewers":{"tstromberg":"approved"},"participant_access":{"lxea":0,"tstromberg":0},"mergeable_state":"blocked","mergeable_state_description":"PR requires approval and has pending status checks","author":"lxea","body":"/bin/s6-svscan has been moved to `/usr/bin/s6-svscan` since Alpine 3.21 which results in errors when trying to run containers using service bundles with any repos after 3.20 \r\n\r\n```\r\nError: crun: executable file `/bin/s6-svscan` not found: No such file or","title":"Use /usr/bin/s6-svscan if it exists","state":"open","test_state":"passing","head_sha":"fa40fc009a9b4af0a4048b1335858ecb0d0f1019","number":1753,"changed_files":1,"deletions":0,"additions":3,"author_write_access":-1,"author_bot":false,"merged":false,"draft":false}}
+{
+ "events": [
+ {
+ "timestamp": "2025-07-13T21:07:57Z",
+ "kind": "pr_opened",
+ "actor": "lxea",
+ "body": "/bin/s6-svscan has been moved to `/usr/bin/s6-svscan` since Alpine 3.21 which results in errors when trying to run containers using service bundles with any repos after 3.20 \r\n\r\n```\r\nError: crun: executable file `/bin/s6-svscan` not found: No such file or",
+ "write_access": -1
+ },
+ { "timestamp": "2025-07-13T21:13:49Z", "kind": "head_ref_force_pushed", "actor": "lxea" },
+ {
+ "timestamp": "2025-07-13T21:15:21Z",
+ "kind": "commit",
+ "actor": "lxea",
+ "body": "Use /usr/bin/s6-svscan if it exists"
+ },
+ { "timestamp": "2025-07-13T21:15:26Z", "kind": "head_ref_force_pushed", "actor": "lxea" },
+ {
+ "timestamp": "2025-07-18T00:43:12Z",
+ "kind": "review",
+ "actor": "tstromberg",
+ "outcome": "approved",
+ "write_access": -1
+ },
+ {
+ "timestamp": "2025-10-19T22:08:13Z",
+ "kind": "commit",
+ "actor": "lxea",
+ "body": "Merge branch 'chainguard-dev:main' into s6-svscan-edge"
+ },
+ {
+ "timestamp": "2025-10-19T22:08:16Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Enforce - Commit Signing",
+ "description": "Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ❌ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-19T22:08:20Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "StepSecurity Required Checks",
+ "description": "StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-19T22:08:23Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "StepSecurity Optional Checks",
+ "description": "StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-19T22:08:37Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "skipped",
+ "body": "StepSecurity Harden-Runner",
+ "description": "No Workflow Runs Found: No new Harden-Runner detections for this pull request.",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-20T06:25:55Z",
+ "kind": "review",
+ "actor": "tstromberg",
+ "outcome": "approved",
+ "body": "Still LGTM, but needs an approver with write access :(",
+ "write_access": -1
+ }
+ ],
+ "pull_request": {
+ "created_at": "2025-07-13T21:07:57Z",
+ "updated_at": "2025-10-20T06:25:55Z",
+ "approval_summary": {
+ "approvals_with_write_access": 0,
+ "approvals_with_unknown_access": 1,
+ "approvals_without_write_access": 0,
+ "changes_requested": 0
+ },
+ "check_summary": {
+ "success": {
+ "Enforce - Commit Signing": "Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ❌ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n",
+ "StepSecurity Optional Checks": "StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n",
+ "StepSecurity Required Checks": "StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n"
+ },
+ "failing": {},
+ "pending": {
+ "CodeQL": "Expected — Waiting for status to be reported",
+ "analyze": "Expected — Waiting for status to be reported",
+ "build": "Expected — Waiting for status to be reported",
+ "lint": "Expected — Waiting for status to be reported",
+ "test": "Expected — Waiting for status to be reported"
+ },
+ "cancelled": {},
+ "skipped": {
+ "StepSecurity Harden-Runner": "No Workflow Runs Found: No new Harden-Runner detections for this pull request."
+ },
+ "stale": {},
+ "neutral": {}
+ },
+ "mergeable": false,
+ "assignees": [],
+ "reviewers": { "tstromberg": "approved" },
+ "participant_access": { "lxea": 0, "tstromberg": 0 },
+ "mergeable_state": "blocked",
+ "mergeable_state_description": "PR requires approval and has pending status checks",
+ "author": "lxea",
+ "body": "/bin/s6-svscan has been moved to `/usr/bin/s6-svscan` since Alpine 3.21 which results in errors when trying to run containers using service bundles with any repos after 3.20 \r\n\r\n```\r\nError: crun: executable file `/bin/s6-svscan` not found: No such file or",
+ "title": "Use /usr/bin/s6-svscan if it exists",
+ "state": "open",
+ "test_state": "passing",
+ "head_sha": "fa40fc009a9b4af0a4048b1335858ecb0d0f1019",
+ "number": 1753,
+ "changed_files": 1,
+ "deletions": 0,
+ "additions": 3,
+ "author_write_access": -1,
+ "author_bot": false,
+ "merged": false,
+ "draft": false
+ }
+}
diff --git a/testdata/pr_1891.json b/testdata/pr_1891.json
index eecfb4b..003261a 100644
--- a/testdata/pr_1891.json
+++ b/testdata/pr_1891.json
@@ -1 +1,251 @@
-{"events":[{"timestamp":"2025-10-15T18:29:51Z","kind":"commit","actor":"markusthoemmes","body":"Only cut releases if we've seen material changes\n\nThis adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups."},{"timestamp":"2025-10-15T18:32:58Z","kind":"pr_opened","actor":"markusthoemmes","body":"This adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups.","write_access":-1},{"timestamp":"2025-10-15T18:33:09Z","kind":"check_run","actor":"github","outcome":"success","body":"StepSecurity Optional Checks","description":"StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n","bot":true},{"timestamp":"2025-10-15T18:33:09Z","kind":"check_run","actor":"github","outcome":"success","body":"StepSecurity Required Checks","description":"StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n","bot":true},{"timestamp":"2025-10-15T18:33:43Z","kind":"check_run","actor":"github","outcome":"success","body":"lint","bot":true,"required":true},{"timestamp":"2025-10-15T18:33:45Z","kind":"check_run","actor":"github","outcome":"success","body":"annotations","bot":true},{"timestamp":"2025-10-15T18:33:59Z","kind":"check_run","actor":"github","outcome":"success","body":"build-date-epoch","bot":true},{"timestamp":"2025-10-15T18:34:03Z","kind":"check_run","actor":"github","outcome":"success","body":"source-date-epoch","bot":true},{"timestamp":"2025-10-15T18:34:17Z","kind":"check_run","actor":"github","outcome":"success","body":"build-nginx-all-arches (x86_64)","bot":true},{"timestamp":"2025-10-15T18:34:22Z","kind":"check_run","actor":"github","outcome":"success","body":"build-nginx-all-arches (aarch64)","bot":true},{"timestamp":"2025-10-15T18:34:22Z","kind":"check_run","actor":"github","outcome":"success","body":"Test on_top_of_base example (x86_64)","bot":true},{"timestamp":"2025-10-15T18:34:29Z","kind":"check_run","actor":"github","outcome":"success","body":"CodeQL","description":"No new alerts in code changed by this pull request: [View all branch alerts](/chainguard-dev/apko/security/code-scanning?query=pr%3A1891+tool%3ACodeQL+is%3Aopen).","bot":true,"required":true},{"timestamp":"2025-10-15T18:34:29Z","kind":"check_run","actor":"github","outcome":"success","body":"Test on_top_of_base example (aarch64)","bot":true},{"timestamp":"2025-10-15T18:34:33Z","kind":"check_run","actor":"github","outcome":"success","body":"build-all-examples-amd64 (ubuntu-latest)","bot":true},{"timestamp":"2025-10-15T18:34:36Z","kind":"check_run","actor":"github","outcome":"success","body":"test","bot":true,"required":true},{"timestamp":"2025-10-15T18:34:37Z","kind":"check_run","actor":"github","outcome":"success","body":"Analyze (go)","bot":true},{"timestamp":"2025-10-15T18:34:49Z","kind":"check_run","actor":"github","outcome":"success","body":"analyze","bot":true,"required":true},{"timestamp":"2025-10-15T18:35:43Z","kind":"check_run","actor":"github","outcome":"success","body":"build-all-examples-amd64 (macos-latest)","bot":true},{"timestamp":"2025-10-15T18:36:10Z","kind":"check_run","actor":"github","outcome":"success","body":"build","bot":true,"required":true},{"timestamp":"2025-10-15T18:36:18Z","kind":"check_run","actor":"github","outcome":"success","body":"StepSecurity Harden-Runner","description":" No anomalous activity on CI/CD runners\n\n: No new Harden-Runner detections for this pull request.\n","bot":true},{"timestamp":"2025-10-16T08:23:14Z","kind":"review","actor":"xnox","outcome":"approved","write_access":2},{"timestamp":"2025-10-16T08:23:24Z","kind":"merged","actor":"xnox"},{"timestamp":"2025-10-16T08:23:25Z","kind":"closed","actor":"xnox"},{"timestamp":"2025-10-16T08:23:25Z","kind":"pr_merged","actor":"xnox"},{"timestamp":"2025-10-16T08:23:27Z","kind":"check_run","actor":"github","outcome":"success","body":"Enforce - Commit Signing","description":"Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ✅ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n","bot":true}],"pull_request":{"created_at":"2025-10-15T18:32:58Z","updated_at":"2025-10-16T08:23:25Z","closed_at":"2025-10-16T08:23:25Z","merged_at":"2025-10-16T08:23:24Z","approval_summary":{"approvals_with_write_access":1,"approvals_with_unknown_access":0,"approvals_without_write_access":0,"changes_requested":0},"check_summary":{"success":{"Analyze (go)":"success","CodeQL":"No new alerts in code changed by this pull request: [View all branch alerts](/chainguard-dev/apko/security/code-scanning?query=pr%3A1891+tool%3ACodeQL+is%3Aopen).","Enforce - Commit Signing":"Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ✅ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n","StepSecurity Harden-Runner":" No anomalous activity on CI/CD runners\n\n: No new Harden-Runner detections for this pull request.\n","StepSecurity Optional Checks":"StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n","StepSecurity Required Checks":"StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n","Test on_top_of_base example (aarch64)":"success","Test on_top_of_base example (x86_64)":"success","analyze":"success","annotations":"success","build":"success","build-all-examples-amd64 (macos-latest)":"success","build-all-examples-amd64 (ubuntu-latest)":"success","build-date-epoch":"success","build-nginx-all-arches (aarch64)":"success","build-nginx-all-arches (x86_64)":"success","lint":"success","source-date-epoch":"success","test":"success"},"failing":{},"pending":{},"cancelled":{},"skipped":{},"stale":{},"neutral":{}},"mergeable":null,"assignees":[],"reviewers":{"xnox":"approved"},"participant_access":{"markusthoemmes":0,"xnox":2},"mergeable_state":"unknown","mergeable_state_description":"Merge status is being calculated","author":"markusthoemmes","body":"This adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups.","title":"Only cut releases if we've seen material changes","merged_by":"xnox","state":"merged","test_state":"passing","head_sha":"96dbbaabc39f99b2ecde409f193d0a06112e7c51","number":1891,"changed_files":1,"deletions":2,"additions":26,"author_write_access":-1,"author_bot":false,"merged":true,"draft":false}}
+{
+ "events": [
+ {
+ "timestamp": "2025-10-15T18:29:51Z",
+ "kind": "commit",
+ "actor": "markusthoemmes",
+ "body": "Only cut releases if we've seen material changes\n\nThis adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups."
+ },
+ {
+ "timestamp": "2025-10-15T18:32:58Z",
+ "kind": "pr_opened",
+ "actor": "markusthoemmes",
+ "body": "This adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups.",
+ "write_access": -1
+ },
+ {
+ "timestamp": "2025-10-15T18:33:09Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "StepSecurity Optional Checks",
+ "description": "StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:33:09Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "StepSecurity Required Checks",
+ "description": "StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:33:43Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "lint",
+ "bot": true,
+ "required": true
+ },
+ {
+ "timestamp": "2025-10-15T18:33:45Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "annotations",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:33:59Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build-date-epoch",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:03Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "source-date-epoch",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:17Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build-nginx-all-arches (x86_64)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:22Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build-nginx-all-arches (aarch64)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:22Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Test on_top_of_base example (x86_64)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:29Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "CodeQL",
+ "description": "No new alerts in code changed by this pull request: [View all branch alerts](/chainguard-dev/apko/security/code-scanning?query=pr%3A1891+tool%3ACodeQL+is%3Aopen).",
+ "bot": true,
+ "required": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:29Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Test on_top_of_base example (aarch64)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:33Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build-all-examples-amd64 (ubuntu-latest)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:36Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "test",
+ "bot": true,
+ "required": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:37Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Analyze (go)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:34:49Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "analyze",
+ "bot": true,
+ "required": true
+ },
+ {
+ "timestamp": "2025-10-15T18:35:43Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build-all-examples-amd64 (macos-latest)",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-15T18:36:10Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "build",
+ "bot": true,
+ "required": true
+ },
+ {
+ "timestamp": "2025-10-15T18:36:18Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "StepSecurity Harden-Runner",
+ "description": " No anomalous activity on CI/CD runners\n\n: No new Harden-Runner detections for this pull request.\n",
+ "bot": true
+ },
+ {
+ "timestamp": "2025-10-16T08:23:14Z",
+ "kind": "review",
+ "actor": "xnox",
+ "outcome": "approved",
+ "write_access": 2
+ },
+ { "timestamp": "2025-10-16T08:23:24Z", "kind": "merged", "actor": "xnox" },
+ { "timestamp": "2025-10-16T08:23:25Z", "kind": "closed", "actor": "xnox" },
+ { "timestamp": "2025-10-16T08:23:25Z", "kind": "pr_merged", "actor": "xnox" },
+ {
+ "timestamp": "2025-10-16T08:23:27Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Enforce - Commit Signing",
+ "description": "Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ✅ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n",
+ "bot": true
+ }
+ ],
+ "pull_request": {
+ "created_at": "2025-10-15T18:32:58Z",
+ "updated_at": "2025-10-16T08:23:25Z",
+ "closed_at": "2025-10-16T08:23:25Z",
+ "merged_at": "2025-10-16T08:23:24Z",
+ "approval_summary": {
+ "approvals_with_write_access": 1,
+ "approvals_with_unknown_access": 0,
+ "approvals_without_write_access": 0,
+ "changes_requested": 0
+ },
+ "check_summary": {
+ "success": {
+ "Analyze (go)": "success",
+ "CodeQL": "No new alerts in code changed by this pull request: [View all branch alerts](/chainguard-dev/apko/security/code-scanning?query=pr%3A1891+tool%3ACodeQL+is%3Aopen).",
+ "Enforce - Commit Signing": "Successfully verified commit signature.: | | CLAIM | DESCRIPTION |\n|----|-------------------------|-------------|\n| ✅ | Found Git signature | |\n| ✅ | Validated Git signature | |\n| ✅ | Validated Rekor entry | |\n| ✅ | Allowed by policy | |\n",
+ "StepSecurity Harden-Runner": " No anomalous activity on CI/CD runners\n\n: No new Harden-Runner detections for this pull request.\n",
+ "StepSecurity Optional Checks": "StepSecurity Optional Checks: Finished StepSecurity Optional Checks\n- **Pwn Request Vulnerabilities Check** - Checks for Pwn Request vulnerabilities in the PR via risky triggers\n",
+ "StepSecurity Required Checks": "StepSecurity Required Checks: Finished StepSecurity Required Checks\n- **Script Injection Check** - Checks for script injection vulnerabilities in the PR\n- **NPM Compromised Packages Check** - Checks for compromised npm package versions in the PR\n- **NPM Package Cooldown Check** - Fails if any package version in the PR was released within the configured cooldown period, helping to avoid brand-new (and potentially unreviewed or malicious) releases\n",
+ "Test on_top_of_base example (aarch64)": "success",
+ "Test on_top_of_base example (x86_64)": "success",
+ "analyze": "success",
+ "annotations": "success",
+ "build": "success",
+ "build-all-examples-amd64 (macos-latest)": "success",
+ "build-all-examples-amd64 (ubuntu-latest)": "success",
+ "build-date-epoch": "success",
+ "build-nginx-all-arches (aarch64)": "success",
+ "build-nginx-all-arches (x86_64)": "success",
+ "lint": "success",
+ "source-date-epoch": "success",
+ "test": "success"
+ },
+ "failing": {},
+ "pending": {},
+ "cancelled": {},
+ "skipped": {},
+ "stale": {},
+ "neutral": {}
+ },
+ "mergeable": null,
+ "assignees": [],
+ "reviewers": { "xnox": "approved" },
+ "participant_access": { "markusthoemmes": 0, "xnox": 2 },
+ "mergeable_state": "unknown",
+ "mergeable_state_description": "Merge status is being calculated",
+ "author": "markusthoemmes",
+ "body": "This adjusts the weekly release process to only cut a release if something material has changed. This is to avoid version bloat on CI-only changes or README touchups.",
+ "title": "Only cut releases if we've seen material changes",
+ "merged_by": "xnox",
+ "state": "merged",
+ "test_state": "passing",
+ "head_sha": "96dbbaabc39f99b2ecde409f193d0a06112e7c51",
+ "number": 1891,
+ "changed_files": 1,
+ "deletions": 2,
+ "additions": 26,
+ "author_write_access": -1,
+ "author_bot": false,
+ "merged": true,
+ "draft": false
+ }
+}
diff --git a/testdata/sprinkler_pr_37.json b/testdata/sprinkler_pr_37.json
index 073957e..867bf36 100644
--- a/testdata/sprinkler_pr_37.json
+++ b/testdata/sprinkler_pr_37.json
@@ -1,21 +1,69 @@
-2025/10/28 22:09:24 INFO cache miss: GraphQL pull request not in cache owner=codeGROOVE-dev repo=sprinkler pr=37
-2025/10/28 22:09:24 INFO fetching pull request via GraphQL owner=codeGROOVE-dev repo=sprinkler pr=37
-2025/10/28 22:09:24 INFO HTTP request starting method=POST url=https://api.github.com/graphql host=api.github.com
-2025/10/28 22:09:25 INFO HTTP response received status=200 url=https://api.github.com/graphql elapsed=1.018633041s
-2025/10/28 22:09:25 INFO GraphQL query completed cost=1 remaining=4971 limit=5000
-2025/10/28 22:09:25 INFO GitHub API request starting method=GET url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/collaborators?affiliation=all&per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/28 22:09:25 INFO HTTP request starting method=GET url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/collaborators?affiliation=all&per_page=100" host=api.github.com
-2025/10/28 22:09:25 INFO HTTP response received status=200 url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/collaborators?affiliation=all&per_page=100" elapsed=170.363583ms
-2025/10/28 22:09:25 INFO GitHub API response received status="200 OK" url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/collaborators?affiliation=all&per_page=100" elapsed=170.53075ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4997 X-RateLimit-Reset:1761706998 X-RateLimit-Resource:collaborators X-RateLimit-Used:3]"
-2025/10/28 22:09:25 INFO GitHub API request starting method=GET url=https://api.github.com/repos/codeGROOVE-dev/sprinkler/rulesets headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/28 22:09:25 INFO HTTP request starting method=GET url=https://api.github.com/repos/codeGROOVE-dev/sprinkler/rulesets host=api.github.com
-2025/10/28 22:09:25 INFO HTTP response received status=200 url=https://api.github.com/repos/codeGROOVE-dev/sprinkler/rulesets elapsed=208.483375ms
-2025/10/28 22:09:25 INFO GitHub API response received status="200 OK" url=https://api.github.com/repos/codeGROOVE-dev/sprinkler/rulesets elapsed=208.550458ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4975 X-RateLimit-Reset:1761706718 X-RateLimit-Resource:core X-RateLimit-Used:25]"
-2025/10/28 22:09:25 INFO fetched required checks from rulesets count=0 checks=[]
-2025/10/28 22:09:25 INFO GitHub API request starting method=GET url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/commits/03baab46ffa62f2d360eaaae7402bebe103639d8/check-runs?per_page=100" headers="map[Accept:application/vnd.github.v3+json Authorization:Bearer gho_...UIqb User-Agent:]"
-2025/10/28 22:09:25 INFO HTTP request starting method=GET url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/commits/03baab46ffa62f2d360eaaae7402bebe103639d8/check-runs?per_page=100" host=api.github.com
-2025/10/28 22:09:26 INFO HTTP response received status=200 url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/commits/03baab46ffa62f2d360eaaae7402bebe103639d8/check-runs?per_page=100" elapsed=197.456917ms
-2025/10/28 22:09:26 INFO GitHub API response received status="200 OK" url="https://api.github.com/repos/codeGROOVE-dev/sprinkler/commits/03baab46ffa62f2d360eaaae7402bebe103639d8/check-runs?per_page=100" elapsed=197.724458ms rate_limits="map[Retry-After: X-RateLimit-Limit:5000 X-RateLimit-Remaining:4974 X-RateLimit-Reset:1761706718 X-RateLimit-Resource:core X-RateLimit-Used:26]"
-2025/10/28 22:09:26 INFO fetched check runs via REST count=1
-2025/10/28 22:09:26 INFO successfully fetched pull request via hybrid GraphQL+REST owner=codeGROOVE-dev repo=sprinkler pr=37 event_count=6 api_calls_made="3 (vs 13+ with REST)"
-{"events":[{"timestamp":"2025-10-29T02:04:26Z","kind":"commit","actor":"Thomas Stromberg","body":"Add TestCheckEventRaceCondition"},{"timestamp":"2025-10-29T02:04:47Z","kind":"pr_opened","actor":"tstromberg","write_access":2},{"timestamp":"2025-10-29T02:05:04Z","kind":"merged","actor":"tstromberg"},{"timestamp":"2025-10-29T02:05:04Z","kind":"closed","actor":"tstromberg"},{"timestamp":"2025-10-29T02:05:04Z","kind":"pr_merged","actor":"tstromberg"},{"timestamp":"2025-10-29T02:05:10Z","kind":"check_run","actor":"github","outcome":"success","body":"Kusari Inspector","description":"Security Analysis Passed: No security issues found","bot":true}],"pull_request":{"created_at":"2025-10-29T02:04:47Z","updated_at":"2025-10-29T02:05:04Z","closed_at":"2025-10-29T02:05:04Z","merged_at":"2025-10-29T02:05:04Z","approval_summary":{"approvals_with_write_access":0,"approvals_with_unknown_access":0,"approvals_without_write_access":0,"changes_requested":0},"check_summary":{"success":{"Kusari Inspector":"Security Analysis Passed: No security issues found"},"failing":{},"pending":{},"cancelled":{},"skipped":{},"stale":{},"neutral":{}},"mergeable":null,"assignees":[],"participant_access":{"Thomas Stromberg":0,"tstromberg":2},"mergeable_state":"unknown","mergeable_state_description":"Merge status is being calculated","author":"tstromberg","body":"","title":"Add TestCheckEventRaceCondition","merged_by":"tstromberg","state":"merged","test_state":"passing","head_sha":"03baab46ffa62f2d360eaaae7402bebe103639d8","number":37,"changed_files":1,"deletions":0,"additions":324,"author_write_access":2,"author_bot":false,"merged":true,"draft":false}}
+{
+ "events": [
+ {
+ "timestamp": "2025-10-29T02:04:26Z",
+ "kind": "commit",
+ "actor": "Thomas Stromberg",
+ "body": "Add TestCheckEventRaceCondition"
+ },
+ {
+ "timestamp": "2025-10-29T02:04:47Z",
+ "kind": "pr_opened",
+ "actor": "tstromberg",
+ "write_access": 2
+ },
+ { "timestamp": "2025-10-29T02:05:04Z", "kind": "merged", "actor": "tstromberg" },
+ { "timestamp": "2025-10-29T02:05:04Z", "kind": "closed", "actor": "tstromberg" },
+ { "timestamp": "2025-10-29T02:05:04Z", "kind": "pr_merged", "actor": "tstromberg" },
+ {
+ "timestamp": "2025-10-29T02:05:10Z",
+ "kind": "check_run",
+ "actor": "github",
+ "outcome": "success",
+ "body": "Kusari Inspector",
+ "description": "Security Analysis Passed: No security issues found",
+ "bot": true
+ }
+ ],
+ "pull_request": {
+ "created_at": "2025-10-29T02:04:47Z",
+ "updated_at": "2025-10-29T02:05:04Z",
+ "closed_at": "2025-10-29T02:05:04Z",
+ "merged_at": "2025-10-29T02:05:04Z",
+ "approval_summary": {
+ "approvals_with_write_access": 0,
+ "approvals_with_unknown_access": 0,
+ "approvals_without_write_access": 0,
+ "changes_requested": 0
+ },
+ "check_summary": {
+ "success": { "Kusari Inspector": "Security Analysis Passed: No security issues found" },
+ "failing": {},
+ "pending": {},
+ "cancelled": {},
+ "skipped": {},
+ "stale": {},
+ "neutral": {}
+ },
+ "mergeable": null,
+ "assignees": [],
+ "participant_access": { "Thomas Stromberg": 0, "tstromberg": 2 },
+ "mergeable_state": "unknown",
+ "mergeable_state_description": "Merge status is being calculated",
+ "author": "tstromberg",
+ "body": "",
+ "title": "Add TestCheckEventRaceCondition",
+ "merged_by": "tstromberg",
+ "state": "merged",
+ "test_state": "passing",
+ "head_sha": "03baab46ffa62f2d360eaaae7402bebe103639d8",
+ "number": 37,
+ "changed_files": 1,
+ "deletions": 0,
+ "additions": 324,
+ "author_write_access": 2,
+ "author_bot": false,
+ "merged": true,
+ "draft": false
+ }
+}