diff --git a/cmd/prcost/main.go b/cmd/prcost/main.go index 553e6a9..86d0068 100644 --- a/cmd/prcost/main.go +++ b/cmd/prcost/main.go @@ -481,72 +481,6 @@ func formatLOC(kloc float64) string { return fmt.Sprintf("%.0fk LOC", kloc) } -// efficiencyGrade returns a letter grade and message based on efficiency percentage (MIT scale). -func efficiencyGrade(efficiencyPct float64) (grade, message string) { - switch { - case efficiencyPct >= 97: - return "A+", "Impeccable" - case efficiencyPct >= 93: - return "A", "Excellent" - case efficiencyPct >= 90: - return "A-", "Nearly excellent" - case efficiencyPct >= 87: - return "B+", "Acceptable+" - case efficiencyPct >= 83: - return "B", "Acceptable" - case efficiencyPct >= 80: - return "B-", "Nearly acceptable" - case efficiencyPct >= 70: - return "C", "Average" - case efficiencyPct >= 60: - return "D", "Not good my friend." - default: - return "F", "Failing" - } -} - -// mergeVelocityGrade returns a grade based on average PR open time in days. -// A+: 4h, A: 8h, A-: 12h, B+: 18h, B: 24h, B-: 36h, C: 100h, D: 120h, F: 120h+. -func mergeVelocityGrade(avgOpenDays float64) (grade, message string) { - switch { - case avgOpenDays <= 0.1667: // 4 hours - return "A+", "Impeccable" - case avgOpenDays <= 0.3333: // 8 hours - return "A", "Excellent" - case avgOpenDays <= 0.5: // 12 hours - return "A-", "Nearly excellent" - case avgOpenDays <= 0.75: // 18 hours - return "B+", "Acceptable+" - case avgOpenDays <= 1.0: // 24 hours - return "B", "Acceptable" - case avgOpenDays <= 1.5: // 36 hours - return "B-", "Nearly acceptable" - case avgOpenDays <= 4.1667: // 100 hours - return "C", "Average" - case avgOpenDays <= 5.0: // 120 hours - return "D", "Not good my friend." - default: - return "F", "Failing" - } -} - -// mergeRateGrade returns a grade based on merge success rate percentage. -// A: >90%, B: >80%, C: >70%, D: >60%, F: ≤60%. -func mergeRateGrade(mergeRatePct float64) (grade, message string) { - switch { - case mergeRatePct > 90: - return "A", "Excellent" - case mergeRatePct > 80: - return "B", "Good" - case mergeRatePct > 70: - return "C", "Acceptable" - case mergeRatePct > 60: - return "D", "Low" - default: - return "F", "Poor" - } -} - // printMergeTimeModelingCallout prints a callout showing potential savings from reduced merge time. func printMergeTimeModelingCallout(breakdown *cost.Breakdown, cfg cost.Config) { targetHours := cfg.TargetMergeTimeHours @@ -649,11 +583,10 @@ func printEfficiency(breakdown *cost.Breakdown) { efficiencyPct = 100.0 } - grade, message := efficiencyGrade(efficiencyPct) + grade, message := cost.EfficiencyGrade(efficiencyPct) - // Calculate merge velocity grade based on PR duration - prDurationDays := breakdown.PRDuration / 24.0 - velocityGrade, velocityMessage := mergeVelocityGrade(prDurationDays) + // Calculate merge velocity grade based on PR duration (in hours) + velocityGrade, velocityMessage := cost.MergeVelocityGrade(breakdown.PRDuration) fmt.Println(" ┌─────────────────────────────────────────────────────────────┐") headerText := fmt.Sprintf("DEVELOPMENT EFFICIENCY: %s (%.1f%%) - %s", grade, efficiencyPct, message) diff --git a/cmd/prcost/repository.go b/cmd/prcost/repository.go index 9cb8fa0..1421e82 100644 --- a/cmd/prcost/repository.go +++ b/cmd/prcost/repository.go @@ -99,17 +99,19 @@ func analyzeRepository(ctx context.Context, owner, repo string, sampleSize, days openPRCount = 0 } - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } - // Extrapolate costs from samples using library function - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prStatuses) + // Extrapolate costs from samples using library function (pass nil for visibility since single-repo = public) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prSummaryInfos, nil) // Display results in itemized format printExtrapolatedResults(fmt.Sprintf("%s/%s", owner, repo), actualDays, &extrapolated, cfg) @@ -208,17 +210,19 @@ func analyzeOrganization(ctx context.Context, org string, sampleSize, days int, } slog.Info("Counted total open PRs across organization", "org", org, "open_prs", totalOpenPRs) - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } - // Extrapolate costs from samples using library function - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prStatuses) + // Extrapolate costs from samples using library function (CLI doesn't fetch visibility, assume public) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prSummaryInfos, nil) // Display results in itemized format printExtrapolatedResults(fmt.Sprintf("%s (organization)", org), actualDays, &extrapolated, cfg) @@ -637,7 +641,7 @@ func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg preventableHours := ext.CodeChurnHours + ext.DeliveryDelayHours + ext.AutomatedUpdatesHours + ext.PRTrackingHours preventableCost := ext.CodeChurnCost + ext.DeliveryDelayCost + ext.AutomatedUpdatesCost + ext.PRTrackingCost - // Calculate efficiency + // Calculate efficiency (for display purposes - grade comes from backend) var efficiencyPct float64 if ext.TotalHours > 0 { efficiencyPct = 100.0 * (ext.TotalHours - preventableHours) / ext.TotalHours @@ -645,11 +649,11 @@ func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg efficiencyPct = 100.0 } - grade, message := efficiencyGrade(efficiencyPct) - - // Calculate merge velocity grade based on average PR duration - avgDurationDays := ext.AvgPRDurationHours / 24.0 - velocityGrade, velocityMessage := mergeVelocityGrade(avgDurationDays) + // Use grades computed by backend (single source of truth) + grade := ext.EfficiencyGrade + message := ext.EfficiencyMessage + velocityGrade := ext.MergeVelocityGrade + velocityMessage := ext.MergeVelocityMessage // Calculate annual waste annualMultiplier := 365.0 / float64(days) @@ -674,11 +678,11 @@ func printExtrapolatedEfficiency(ext *cost.ExtrapolatedBreakdown, days int, cfg fmt.Printf(" │ %-60s│\n", velocityHeader) fmt.Println(" └─────────────────────────────────────────────────────────────┘") - // Merge Rate box (if data available) + // Merge Success Rate box (if data available) if ext.MergedPRs+ext.UnmergedPRs > 0 { - mergeRateGradeStr, mergeRateMessage := mergeRateGrade(ext.MergeRate) + // Use grade computed by backend (single source of truth) fmt.Println(" ┌─────────────────────────────────────────────────────────────┐") - mergeRateHeader := fmt.Sprintf("MERGE RATE: %s (%.1f%%) - %s", mergeRateGradeStr, ext.MergeRate, mergeRateMessage) + mergeRateHeader := fmt.Sprintf("MERGE SUCCESS RATE: %s (%.1f%%) - %s", ext.MergeRateGrade, ext.MergeRate, ext.MergeRateGradeMessage) if len(mergeRateHeader) > innerWidth { mergeRateHeader = mergeRateHeader[:innerWidth] } diff --git a/internal/server/integration_test.go b/internal/server/integration_test.go index ffe43d3..87d48be 100644 --- a/internal/server/integration_test.go +++ b/internal/server/integration_test.go @@ -33,7 +33,7 @@ func TestOrgSampleStreamIntegration(t *testing.T) { // Create request reqBody := OrgSampleRequest{ Org: "codeGROOVE-dev", - SampleSize: 50, + SampleSize: 100, Days: 60, } body, err := json.Marshal(reqBody) @@ -195,7 +195,7 @@ func TestOrgSampleStreamNoTimeout(t *testing.T) { // Create request with larger sample size to ensure longer operation reqBody := OrgSampleRequest{ Org: "codeGROOVE-dev", - SampleSize: 50, + SampleSize: 100, Days: 60, } body, err := json.Marshal(reqBody) diff --git a/internal/server/server.go b/internal/server/server.go index c7a7841..5909601 100644 --- a/internal/server/server.go +++ b/internal/server/server.go @@ -144,7 +144,7 @@ type CalculateResponse struct { type RepoSampleRequest struct { Owner string `json:"owner"` Repo string `json:"repo"` - SampleSize int `json:"sample_size,omitempty"` // Default: 50 + SampleSize int `json:"sample_size,omitempty"` // Default: 100 Days int `json:"days,omitempty"` // Default: 60 Config *cost.Config `json:"config,omitempty"` } @@ -154,7 +154,7 @@ type RepoSampleRequest struct { //nolint:govet // fieldalignment: API struct field order optimized for readability type OrgSampleRequest struct { Org string `json:"org"` - SampleSize int `json:"sample_size,omitempty"` // Default: 50 + SampleSize int `json:"sample_size,omitempty"` // Default: 100 Days int `json:"days,omitempty"` // Default: 60 Config *cost.Config `json:"config,omitempty"` } @@ -1478,18 +1478,18 @@ func (s *Server) parseRepoSampleRequest(ctx context.Context, r *http.Request) (* // Set defaults if req.SampleSize == 0 { - req.SampleSize = 50 + req.SampleSize = 100 } if req.Days == 0 { req.Days = 60 } - // Validate reasonable limits (silently cap at 50) + // Validate reasonable limits (silently cap at 100) if req.SampleSize < 1 { return nil, errors.New("sample_size must be at least 1") } - if req.SampleSize > 50 { - req.SampleSize = 50 + if req.SampleSize > 100 { + req.SampleSize = 100 } if req.Days < 1 || req.Days > 365 { return nil, errors.New("days must be between 1 and 365") @@ -1536,18 +1536,18 @@ func (s *Server) parseOrgSampleRequest(ctx context.Context, r *http.Request) (*O // Set defaults if req.SampleSize == 0 { - req.SampleSize = 50 + req.SampleSize = 100 } if req.Days == 0 { req.Days = 60 } - // Validate reasonable limits (silently cap at 50) + // Validate reasonable limits (silently cap at 100) if req.SampleSize < 1 { return nil, errors.New("sample_size must be at least 1") } - if req.SampleSize > 50 { - req.SampleSize = 50 + if req.SampleSize > 100 { + req.SampleSize = 100 } if req.Days < 1 || req.Days > 365 { return nil, errors.New("days must be between 1 and 365") @@ -1659,17 +1659,19 @@ func (s *Server) processRepoSample(ctx context.Context, req *RepoSampleRequest, openPRCount = 0 } - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } // Extrapolate costs from samples - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prStatuses) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prSummaryInfos, nil) // Only include seconds_in_state if we have data (turnserver only) var secondsInState map[string]int @@ -1721,6 +1723,23 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to return nil, fmt.Errorf("no PRs found in the last %d days", req.Days) } + // Fetch repository visibility for the organization (2x the time period for comprehensive coverage) + reposSince := time.Now().AddDate(0, 0, -req.Days*2) + repoVisibilityData, err := github.FetchOrgRepositoriesWithActivity(ctx, req.Org, reposSince, token) + if err != nil { + s.logger.WarnContext(ctx, "Failed to fetch repository visibility, assuming all public", "error", err) + repoVisibilityData = nil + } + + // Convert RepoVisibility map to bool map (repo name -> isPrivate) + var repoVisibility map[string]bool + if repoVisibilityData != nil { + repoVisibility = make(map[string]bool, len(repoVisibilityData)) + for name, visibility := range repoVisibilityData { + repoVisibility[name] = visibility.IsPrivate + } + } + // Calculate actual time window (may be less than requested if we hit API limit) actualDays, _ = github.CalculateActualTimeWindow(prs, req.Days) @@ -1788,17 +1807,19 @@ func (s *Server) processOrgSample(ctx context.Context, req *OrgSampleRequest, to } s.logger.InfoContext(ctx, "Counted total open PRs across organization", "org", req.Org, "open_prs", totalOpenPRs) - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } // Extrapolate costs from samples - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prStatuses) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prSummaryInfos, repoVisibility) // Only include seconds_in_state if we have data (turnserver only) var secondsInState map[string]int @@ -2194,17 +2215,19 @@ func (s *Server) processRepoSampleWithProgress(ctx context.Context, req *RepoSam openPRCount = 0 } - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } // Extrapolate costs from samples - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prStatuses) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, openPRCount, actualDays, cfg, prSummaryInfos, nil) // Only include seconds_in_state if we have data (turnserver only) var secondsInState map[string]int @@ -2353,17 +2376,19 @@ func (s *Server) processOrgSampleWithProgress(ctx context.Context, req *OrgSampl } s.logger.InfoContext(ctx, "Counted total open PRs across organization", "open_prs", totalOpenPRs, "org", req.Org) - // Convert PRSummary to PRMergeStatus for merge rate calculation - prStatuses := make([]cost.PRMergeStatus, len(prs)) + // Convert PRSummary to PRSummaryInfo for extrapolation + prSummaryInfos := make([]cost.PRSummaryInfo, len(prs)) for i, pr := range prs { - prStatuses[i] = cost.PRMergeStatus{ + prSummaryInfos[i] = cost.PRSummaryInfo{ + Owner: pr.Owner, + Repo: pr.Repo, Merged: pr.Merged, State: pr.State, } } // Extrapolate costs from samples - extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prStatuses) + extrapolated := cost.ExtrapolateFromSamples(breakdowns, len(prs), totalAuthors, totalOpenPRs, actualDays, cfg, prSummaryInfos, nil) // Only include seconds_in_state if we have data (turnserver only) var secondsInState map[string]int diff --git a/internal/server/server_test.go b/internal/server/server_test.go index 39fe200..0720c14 100644 --- a/internal/server/server_test.go +++ b/internal/server/server_test.go @@ -1484,7 +1484,7 @@ func TestParseRepoSampleRequest(t *testing.T) { wantOwner: "testowner", wantRepo: "testrepo", wantDays: 60, - wantSampleSize: 50, + wantSampleSize: 100, }, { name: "missing owner", @@ -1571,7 +1571,7 @@ func TestParseOrgSampleRequest(t *testing.T) { wantErr: false, wantOrg: "testorg", wantDays: 60, - wantSampleSize: 50, + wantSampleSize: 100, }, { name: "missing org", diff --git a/internal/server/static/index.html b/internal/server/static/index.html index 1b73733..dfdb78b 100644 --- a/internal/server/static/index.html +++ b/internal/server/static/index.html @@ -556,14 +556,14 @@ border: 2px solid #e5e5ea; margin-bottom: 32px; box-shadow: 0 4px 12px rgba(0, 0, 0, 0.06); - display: grid; - grid-template-columns: repeat(auto-fit, minmax(220px, 1fr)); - gap: 12px; + display: flex; + flex-wrap: wrap; + gap: 10px; } @media (max-width: 768px) { .efficiency-section { - grid-template-columns: 1fr; + flex-direction: column; } } @@ -641,11 +641,13 @@ .efficiency-box { background: #ffffff; - padding: 14px 16px; + padding: 10px 12px; border-radius: 10px; border: 1.5px solid #e5e5ea; box-shadow: 0 2px 8px rgba(0, 0, 0, 0.04); margin: 0; + flex: 1; + min-width: 165px; } .efficiency-callout { @@ -1061,7 +1063,7 @@

PR Cost Calculator

id="repoSampleSize" value="50" min="1" - max="50" + max="100" >
50 (recommended, ±14% accuracy) or 30 (faster, ±18% accuracy)
@@ -1101,7 +1103,7 @@

PR Cost Calculator

id="orgSampleSize" value="50" min="1" - max="50" + max="100" >
50 (recommended, ±14% accuracy) or 30 (faster, ±18% accuracy)
@@ -1424,41 +1426,44 @@

Why calculate PR costs?

} } - function formatEfficiencyHTML(efficiencyPct, grade, message, preventableCost, preventableHours, totalCost, totalHours, avgOpenHours, isAnnual = false, annualWasteCost = 0, annualWasteHours = 0, wasteHoursPerWeek = 0, wasteCostPerWeek = 0, wasteHoursPerAuthorPerWeek = 0, wasteCostPerAuthorPerWeek = 0, totalAuthors = 0, salary = 250000, benefitsMultiplier = 1.2, analysisType = 'project', sourceName = '', mergeRate = 0, mergedPRs = 0, unmergedPRs = 0) { + function formatEfficiencyHTML(efficiencyPct, grade, message, preventableCost, preventableHours, totalCost, totalHours, avgOpenHours, isAnnual = false, annualWasteCost = 0, annualWasteHours = 0, wasteHoursPerWeek = 0, wasteCostPerWeek = 0, wasteHoursPerAuthorPerWeek = 0, wasteCostPerAuthorPerWeek = 0, totalAuthors = 0, salary = 250000, benefitsMultiplier = 1.2, analysisType = 'project', sourceName = '', mergeRate = 0, mergedPRs = 0, unmergedPRs = 0, velocityGrade = '', velocityMessage = '', mergeRateGrade = '', mergeRateMessage = '') { let html = '
'; // Development Efficiency box html += '
'; - html += '

Development Efficiency

'; - html += '
'; - html += `${grade}`; - html += `${efficiencyPct.toFixed(1)}%`; + html += '

Development Efficiency

'; + html += '
'; + html += `${grade}`; + html += `${efficiencyPct.toFixed(1)}%`; html += '
'; - html += `
${message}
`; - html += '
Time spent coding vs. waiting
'; + html += `
${message}
`; html += '
'; // Close efficiency-box - // Merge Velocity box - const velocityGradeObj = mergeVelocityGrade(avgOpenHours); + // Merge Velocity box - use backend-computed grades if provided, fallback to client calculation + const velocityGradeObj = velocityGrade && velocityMessage + ? { grade: velocityGrade, message: velocityMessage } + : mergeVelocityGrade(avgOpenHours); html += '
'; - html += '

Average Merge Velocity

'; - html += '
'; - html += `${velocityGradeObj.grade}`; - html += `${formatTimeUnit(avgOpenHours)}`; + html += '

Merge Velocity

'; + html += '
'; + html += `${velocityGradeObj.grade}`; + html += `${formatTimeUnit(avgOpenHours)}`; html += '
'; - html += `
${velocityGradeObj.message}
`; + html += `
${velocityGradeObj.message}
`; html += '
'; // Close efficiency-box - // Merge Rate box (if data available) + // Merge Success Rate box (if data available) - use backend-computed grades if provided if (mergedPRs + unmergedPRs > 0) { - const mergeRateGradeObj = mergeRateGrade(mergeRate); + const mergeRateGradeObj = mergeRateGrade && mergeRateMessage + ? { grade: mergeRateGrade, message: mergeRateMessage } + : mergeRateGrade(mergeRate); html += '
'; - html += '

Merge Rate

'; - html += '
'; - html += `${mergeRateGradeObj.grade}`; - html += `${mergeRate.toFixed(1)}%`; + html += '

Merge Success

'; + html += '
'; + html += `${mergeRateGradeObj.grade}`; + html += `${mergeRate.toFixed(1)}%`; html += '
'; - html += `
${mergeRateGradeObj.message}
`; + html += `
${mergeRateGradeObj.message}
`; html += '
Recently modified PRs successfully merged
'; html += '
'; // Close efficiency-box } @@ -2366,7 +2371,12 @@

Why calculate PR costs?

// Build elegant HTML structure let html = '
'; - html += `

${sourceName}

`; + const repoCount = e.unique_repositories || 0; + const publicCount = e.public_repositories || 0; + const privateCount = e.private_repositories || 0; + const repoText = repoCount === 1 ? 'repository' : 'repositories'; + const visibilityText = privateCount > 0 ? `(${publicCount} public, ${privateCount} private)` : 'public'; + html += `

${sourceName} - ${repoCount} ${visibilityText} ${repoText} analyzed

`; html += '
'; html += `Period: Last ${days} days  •  `; html += `Total public PRs: ${e.total_prs} (${e.human_prs} human, ${e.bot_prs} bot)  •  `; @@ -2382,7 +2392,8 @@

Why calculate PR costs?

const extPreventableHours = e.code_churn_hours + e.delivery_delay_hours + e.automated_updates_hours + e.pr_tracking_hours; const extPreventableCost = e.code_churn_cost + e.delivery_delay_cost + e.automated_updates_cost + e.pr_tracking_cost; const extEfficiencyPct = e.total_hours > 0 ? 100.0 * (e.total_hours - extPreventableHours) / e.total_hours : 100.0; - const extEfficiency = efficiencyGrade(extEfficiencyPct); + // Use grades computed by backend (single source of truth) + const extEfficiency = { grade: e.efficiency_grade, message: e.efficiency_message }; const annualMultiplier = 365.0 / parseInt(days); const annualWasteHours = extPreventableHours * annualMultiplier; const annualWasteCost = extPreventableCost * annualMultiplier; @@ -2401,7 +2412,12 @@

Why calculate PR costs?

const mergeRate = e.merge_rate || 0; const mergedPRs = e.merged_prs || 0; const unmergedPRs = e.unmerged_prs || 0; - html += formatEfficiencyHTML(extEfficiencyPct, extEfficiency.grade, extEfficiency.message, extPreventableCost, extPreventableHours, e.total_cost, e.total_hours, avgPRDurationHours, true, annualWasteCost, annualWasteHours, wasteHoursPerWeek, wasteCostPerWeek, wasteHoursPerAuthorPerWeek, wasteCostPerAuthorPerWeek, totalAuthors, salary, benefitsMultiplier, analysisType, sourceName, mergeRate, mergedPRs, unmergedPRs); + // Use backend-computed grades (single source of truth) + const velocityGrade = e.merge_velocity_grade || ''; + const velocityMessage = e.merge_velocity_message || ''; + const mergeRateGrade = e.merge_rate_grade || ''; + const mergeRateMessage = e.merge_rate_grade_message || ''; + html += formatEfficiencyHTML(extEfficiencyPct, extEfficiency.grade, extEfficiency.message, extPreventableCost, extPreventableHours, e.total_cost, e.total_hours, avgPRDurationHours, true, annualWasteCost, annualWasteHours, wasteHoursPerWeek, wasteCostPerWeek, wasteHoursPerAuthorPerWeek, wasteCostPerAuthorPerWeek, totalAuthors, salary, benefitsMultiplier, analysisType, sourceName, mergeRate, mergedPRs, unmergedPRs, velocityGrade, velocityMessage, mergeRateGrade, mergeRateMessage); // Add R2R callout if enabled, otherwise generic merge time callout // Calculate modeled efficiency (with 1.5h merge time) diff --git a/pkg/cost/analyze.go b/pkg/cost/analyze.go index e0062a4..f4261c5 100644 --- a/pkg/cost/analyze.go +++ b/pkg/cost/analyze.go @@ -32,12 +32,14 @@ type AnalysisRequest struct { Concurrency int // Number of concurrent fetches (0 = sequential) } -// PRSummaryInfo contains basic PR information needed for fetching. +// PRSummaryInfo contains basic PR information needed for fetching and analysis. type PRSummaryInfo struct { UpdatedAt time.Time Owner string Repo string + State string // "OPEN", "CLOSED", "MERGED" Number int + Merged bool // Whether the PR was merged } // AnalysisResult contains the breakdowns from analyzed PRs. diff --git a/pkg/cost/cost.go b/pkg/cost/cost.go index 4426faa..bf6c8a0 100644 --- a/pkg/cost/cost.go +++ b/pkg/cost/cost.go @@ -154,12 +154,12 @@ type PRData struct { CreatedAt time.Time ClosedAt time.Time Author string + State string Events []ParticipantEvent LinesAdded int LinesDeleted int AuthorBot bool - Merged bool // Whether the PR was merged - State string // PR state: "open", "closed" + Merged bool } // AuthorCostDetail breaks down the author's costs. diff --git a/pkg/cost/cost_test.go b/pkg/cost/cost_test.go index 501e022..7f7e587 100644 --- a/pkg/cost/cost_test.go +++ b/pkg/cost/cost_test.go @@ -1261,7 +1261,7 @@ func TestAnalyzePRsContextCancellation(t *testing.T) { func TestExtrapolateFromSamplesEmpty(t *testing.T) { cfg := DefaultConfig() - result := ExtrapolateFromSamples([]Breakdown{}, 100, 10, 5, 30, cfg, []PRMergeStatus{}) + result := ExtrapolateFromSamples([]Breakdown{}, 100, 10, 5, 30, cfg, []PRSummaryInfo{}, nil) if result.TotalPRs != 100 { t.Errorf("Expected TotalPRs=100, got %d", result.TotalPRs) @@ -1298,12 +1298,12 @@ func TestExtrapolateFromSamplesSingle(t *testing.T) { // Extrapolate from 1 sample to 10 total PRs // Create merge status for 10 PRs: 9 merged, 1 open - prStatuses := make([]PRMergeStatus, 10) - for i := 0; i < 9; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 10) + for i := range 9 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } - prStatuses[9] = PRMergeStatus{Merged: false, State: "OPEN"} - result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 2, 0, 7, cfg, prStatuses) + prStatuses[9] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: false, State: "OPEN"} + result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 2, 0, 7, cfg, prStatuses, nil) if result.TotalPRs != 10 { t.Errorf("Expected TotalPRs=10, got %d", result.TotalPRs) @@ -1368,14 +1368,14 @@ func TestExtrapolateFromSamplesMultiple(t *testing.T) { // Extrapolate from 2 samples to 20 total PRs over 14 days // Create merge status for 20 PRs: 17 merged, 3 open - prStatuses := make([]PRMergeStatus, 20) - for i := 0; i < 17; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 20) + for i := range 17 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } for i := 17; i < 20; i++ { - prStatuses[i] = PRMergeStatus{Merged: false, State: "OPEN"} + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: false, State: "OPEN"} } - result := ExtrapolateFromSamples(breakdowns, 20, 5, 3, 14, cfg, prStatuses) + result := ExtrapolateFromSamples(breakdowns, 20, 5, 3, 14, cfg, prStatuses, nil) if result.TotalPRs != 20 { t.Errorf("Expected TotalPRs=20, got %d", result.TotalPRs) @@ -1446,11 +1446,11 @@ func TestExtrapolateFromSamplesBotVsHuman(t *testing.T) { } // Create merge status for 10 PRs: all merged - prStatuses := make([]PRMergeStatus, 10) - for i := 0; i < 10; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 10) + for i := range 10 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } - result := ExtrapolateFromSamples(breakdowns, 10, 5, 0, 7, cfg, prStatuses) + result := ExtrapolateFromSamples(breakdowns, 10, 5, 0, 7, cfg, prStatuses, nil) // Should have both human and bot PR counts if result.HumanPRs <= 0 { @@ -1502,11 +1502,11 @@ func TestExtrapolateFromSamplesWasteCalculation(t *testing.T) { // Extrapolate over 7 days // Create merge status for 10 PRs: all merged - prStatuses := make([]PRMergeStatus, 10) - for i := 0; i < 10; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 10) + for i := range 10 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } - result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 3, 0, 7, cfg, prStatuses) + result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 3, 0, 7, cfg, prStatuses, nil) // Waste per week should be calculated if result.WasteHoursPerWeek <= 0 { @@ -1552,14 +1552,14 @@ func TestExtrapolateFromSamplesR2RSavings(t *testing.T) { } // Create merge status for 100 PRs: 95 merged, 5 open - prStatuses := make([]PRMergeStatus, 100) - for i := 0; i < 95; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 100) + for i := range 95 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } for i := 95; i < 100; i++ { - prStatuses[i] = PRMergeStatus{Merged: false, State: "OPEN"} + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: false, State: "OPEN"} } - result := ExtrapolateFromSamples(breakdowns, 100, 10, 5, 30, cfg, prStatuses) + result := ExtrapolateFromSamples(breakdowns, 100, 10, 5, 30, cfg, prStatuses, nil) // R2R savings should be calculated // Savings formula: baseline waste - remodeled waste - subscription cost @@ -1597,14 +1597,14 @@ func TestExtrapolateFromSamplesOpenPRTracking(t *testing.T) { // Test with actual open PRs actualOpenPRs := 15 // Create merge status for 100 PRs: 85 merged, 15 open - prStatuses := make([]PRMergeStatus, 100) - for i := 0; i < 85; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 100) + for i := range 85 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } for i := 85; i < 100; i++ { - prStatuses[i] = PRMergeStatus{Merged: false, State: "OPEN"} + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: false, State: "OPEN"} } - result := ExtrapolateFromSamples([]Breakdown{breakdown}, 100, 5, actualOpenPRs, 30, cfg, prStatuses) + result := ExtrapolateFromSamples([]Breakdown{breakdown}, 100, 5, actualOpenPRs, 30, cfg, prStatuses, nil) // Open PRs should match actual count (not extrapolated) if result.OpenPRs != actualOpenPRs { @@ -1641,11 +1641,11 @@ func TestExtrapolateFromSamplesParticipants(t *testing.T) { }, cfg) // Create merge status for 10 PRs: all merged - prStatuses := make([]PRMergeStatus, 10) - for i := 0; i < 10; i++ { - prStatuses[i] = PRMergeStatus{Merged: true, State: "MERGED"} + prStatuses := make([]PRSummaryInfo, 10) + for i := range 10 { + prStatuses[i] = PRSummaryInfo{Owner: "test", Repo: "test", Merged: true, State: "MERGED"} } - result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 5, 0, 7, cfg, prStatuses) + result := ExtrapolateFromSamples([]Breakdown{breakdown}, 10, 5, 0, 7, cfg, prStatuses, nil) // Participant costs should be extrapolated if result.ParticipantReviewCost <= 0 { diff --git a/pkg/cost/extrapolate.go b/pkg/cost/extrapolate.go index 019d79d..c4b0f03 100644 --- a/pkg/cost/extrapolate.go +++ b/pkg/cost/extrapolate.go @@ -13,6 +13,8 @@ type PRMergeStatus struct { // ExtrapolatedBreakdown represents cost estimates extrapolated from a sample // of PRs to estimate total costs across a larger population. +// +//nolint:govet // fieldalignment: struct optimized for API clarity over memory layout type ExtrapolatedBreakdown struct { // Sample metadata TotalPRs int `json:"total_prs"` // Total number of PRs in the population @@ -22,6 +24,9 @@ type ExtrapolatedBreakdown struct { SuccessfulSamples int `json:"successful_samples"` // Number of samples that processed successfully UniqueAuthors int `json:"unique_authors"` // Number of unique PR authors (excluding bots) in sample TotalAuthors int `json:"total_authors"` // Total unique authors across all PRs (not just samples) + UniqueRepositories int `json:"unique_repositories"` // Number of unique repositories with PRs + PublicRepositories int `json:"public_repositories"` // Number of public repositories analyzed + PrivateRepositories int `json:"private_repositories"` // Number of private repositories analyzed WasteHoursPerWeek float64 `json:"waste_hours_per_week"` // Preventable hours wasted per week (organizational) WasteCostPerWeek float64 `json:"waste_cost_per_week"` // Preventable cost wasted per week (organizational) WasteHoursPerAuthorPerWeek float64 `json:"waste_hours_per_author_per_week"` // Preventable hours wasted per author per week @@ -109,6 +114,14 @@ type ExtrapolatedBreakdown struct { MergeRate float64 `json:"merge_rate"` // Percentage of PRs successfully merged (0-100) MergeRateNote string `json:"merge_rate_note"` // Explanation of what counts as merged/unmerged + // Grading (computed from metrics above) + EfficiencyGrade string `json:"efficiency_grade"` // Letter grade for development efficiency + EfficiencyMessage string `json:"efficiency_message"` // Description of efficiency grade + MergeVelocityGrade string `json:"merge_velocity_grade"` // Letter grade for merge velocity + MergeVelocityMessage string `json:"merge_velocity_message"` // Description of merge velocity grade + MergeRateGrade string `json:"merge_rate_grade"` // Letter grade for merge rate + MergeRateGradeMessage string `json:"merge_rate_grade_message"` // Description of merge rate grade + // R2R cost savings calculation UniqueNonBotUsers int `json:"unique_non_bot_users"` // Count of unique non-bot users (authors + participants) R2RSavings float64 `json:"r2r_savings"` // Annual savings if R2R cuts PR time to target merge time @@ -124,7 +137,8 @@ type ExtrapolatedBreakdown struct { // - actualOpenPRs: Count of actually open PRs (for tracking overhead) // - daysInPeriod: Number of days the sample covers (for per-week calculations) // - cfg: Configuration for hourly rate and hours per week calculation -// - prStatuses: Merge status for all PRs (for merge rate calculation) +// - prs: Slice of PR summaries (for merge rate and repository counting) +// - repoVisibility: Map of repository name to visibility status (nil = assume all public) // // Returns: // - ExtrapolatedBreakdown with averaged costs scaled to total population @@ -133,28 +147,53 @@ type ExtrapolatedBreakdown struct { // by the total PR count to estimate population-wide costs. // //nolint:revive,maintidx // Complex calculation function benefits from cohesion -func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actualOpenPRs int, daysInPeriod int, cfg Config, prStatuses []PRMergeStatus) ExtrapolatedBreakdown { +func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actualOpenPRs int, daysInPeriod int, cfg Config, prs []PRSummaryInfo, repoVisibility map[string]bool) ExtrapolatedBreakdown { + // Count unique repositories and their visibility + uniqueRepos := make(map[string]bool) + publicCount := 0 + privateCount := 0 + + for _, pr := range prs { + repoKey := pr.Owner + "/" + pr.Repo + if uniqueRepos[repoKey] { + continue // Already counted this repo + } + uniqueRepos[repoKey] = true + + // Check visibility - if repoVisibility is nil or doesn't have this repo, assume public + if repoVisibility != nil { + if isPrivate, ok := repoVisibility[pr.Repo]; ok && isPrivate { + privateCount++ + } else { + publicCount++ + } + } else { + publicCount++ + } + } + if len(breakdowns) == 0 { // Calculate merge rate even with no successful samples mergedCount := 0 - for _, status := range prStatuses { - if status.Merged { + for _, pr := range prs { + if pr.Merged { mergedCount++ } } mergeRate := 0.0 - if len(prStatuses) > 0 { - mergeRate = 100.0 * float64(mergedCount) / float64(len(prStatuses)) + if len(prs) > 0 { + mergeRate = 100.0 * float64(mergedCount) / float64(len(prs)) } return ExtrapolatedBreakdown{ - TotalPRs: totalPRs, - SampledPRs: 0, - SuccessfulSamples: 0, - MergedPRs: mergedCount, - UnmergedPRs: len(prStatuses) - mergedCount, - MergeRate: mergeRate, - MergeRateNote: "Recently modified PRs successfully merged", + TotalPRs: totalPRs, + SampledPRs: 0, + SuccessfulSamples: 0, + UniqueRepositories: len(uniqueRepos), + MergedPRs: mergedCount, + UnmergedPRs: len(prs) - mergedCount, + MergeRate: mergeRate, + MergeRateNote: "Recently modified PRs successfully merged", } } @@ -485,8 +524,8 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu // Calculate merge rate from all PRs (not just samples) mergedCount := 0 unmergedCount := 0 - for _, status := range prStatuses { - if status.Merged { + for _, pr := range prs { + if pr.Merged { mergedCount++ } else { unmergedCount++ @@ -494,16 +533,30 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu } mergeRate := 0.0 - if len(prStatuses) > 0 { - mergeRate = 100.0 * float64(mergedCount) / float64(len(prStatuses)) + if len(prs) > 0 { + mergeRate = 100.0 * float64(mergedCount) / float64(len(prs)) } slog.Info("Calculated merge rate from all PRs", - "total_prs", len(prStatuses), + "total_prs", len(prs), "merged", mergedCount, "unmerged", unmergedCount, "merge_rate_pct", mergeRate) + // Calculate efficiency percentage and grade + productiveCost := extAuthorTotal + extParticipantCost + efficiencyPct := 0.0 + if extTotalCost > 0 { + efficiencyPct = 100.0 * productiveCost / extTotalCost + } + efficiencyGrade, efficiencyMessage := EfficiencyGrade(efficiencyPct) + + // Calculate merge velocity grade + mergeVelocityGrade, mergeVelocityMessage := MergeVelocityGrade(avgPRDuration) + + // Calculate merge rate grade + mergeRateGrade, mergeRateGradeMessage := MergeRateGrade(mergeRate) + return ExtrapolatedBreakdown{ TotalPRs: totalPRs, HumanPRs: extHumanPRs, @@ -587,7 +640,17 @@ func ExtrapolateFromSamples(breakdowns []Breakdown, totalPRs, totalAuthors, actu MergeRate: mergeRate, MergeRateNote: "Recently modified PRs successfully merged", - UniqueNonBotUsers: uniqueUserCount, - R2RSavings: r2rSavings, + EfficiencyGrade: efficiencyGrade, + EfficiencyMessage: efficiencyMessage, + MergeVelocityGrade: mergeVelocityGrade, + MergeVelocityMessage: mergeVelocityMessage, + MergeRateGrade: mergeRateGrade, + MergeRateGradeMessage: mergeRateGradeMessage, + + UniqueNonBotUsers: uniqueUserCount, + UniqueRepositories: len(uniqueRepos), + PublicRepositories: publicCount, + PrivateRepositories: privateCount, + R2RSavings: r2rSavings, } } diff --git a/pkg/cost/grading.go b/pkg/cost/grading.go new file mode 100644 index 0000000..31977d4 --- /dev/null +++ b/pkg/cost/grading.go @@ -0,0 +1,63 @@ +package cost + +// EfficiencyGrade returns a letter grade and message based on efficiency percentage (MIT scale). +// Efficiency is the percentage of total cost that goes to productive work (author + participant) +// vs overhead/delays. +func EfficiencyGrade(efficiencyPct float64) (grade, message string) { + switch { + case efficiencyPct >= 97: + return "A+", "Impeccable" + case efficiencyPct >= 93: + return "A", "Excellent" + case efficiencyPct >= 90: + return "A-", "Nearly excellent" + case efficiencyPct >= 87: + return "B+", "Acceptable+" + case efficiencyPct >= 83: + return "B", "Acceptable" + case efficiencyPct >= 80: + return "B-", "Nearly acceptable" + case efficiencyPct >= 70: + return "C", "Average" + case efficiencyPct >= 60: + return "D", "Not good my friend." + default: + return "F", "Failing" + } +} + +// MergeVelocityGrade returns a grade based on average PR open time in hours. +// Faster merge times indicate better team velocity and lower coordination overhead. +func MergeVelocityGrade(avgOpenHours float64) (grade, message string) { + switch { + case avgOpenHours <= 4: // 4 hours + return "A+", "World-class velocity" + case avgOpenHours <= 24: // 1 day + return "A", "High-performing team" + case avgOpenHours <= 84: // 3.5 days + return "B", "Room for improvement" + case avgOpenHours <= 132: // 5.5 days + return "C", "Sluggish" + case avgOpenHours <= 168: // 7 days (1 week) + return "D", "Slow" + default: + return "F", "Failing" + } +} + +// MergeRateGrade returns a grade based on the percentage of PRs successfully merged. +// Higher merge rates indicate less wasted effort on abandoned work. +func MergeRateGrade(mergeRatePct float64) (grade, message string) { + switch { + case mergeRatePct > 90: + return "A", "Excellent" + case mergeRatePct > 80: + return "B", "Good" + case mergeRatePct > 70: + return "C", "Acceptable" + case mergeRatePct > 60: + return "D", "Low" + default: + return "F", "Poor" + } +} diff --git a/pkg/github/query.go b/pkg/github/query.go index b8c8861..43f3746 100644 --- a/pkg/github/query.go +++ b/pkg/github/query.go @@ -993,3 +993,145 @@ func CountOpenPRsInOrg(ctx context.Context, org, token string) (int, error) { return count, nil } + +// RepoVisibility contains repository name and privacy status. +type RepoVisibility struct { + Name string + IsPrivate bool +} + +// FetchOrgRepositoriesWithActivity fetches all repositories in an organization +// that had activity (pushes) in the specified time period, along with their privacy status. +// This is useful for determining which repositories were analyzed and whether they're public or private. +// +// Parameters: +// - ctx: Context for the API call +// - org: GitHub organization name +// - since: Only include repos with activity after this time +// - token: GitHub authentication token +// +// Returns: +// - Map of repository name to RepoVisibility struct +func FetchOrgRepositoriesWithActivity(ctx context.Context, org string, since time.Time, token string) (map[string]RepoVisibility, error) { + query := ` + query($org: String!, $cursor: String) { + organization(login: $org) { + repositories(first: 100, after: $cursor, orderBy: {field: PUSHED_AT, direction: DESC}) { + pageInfo { + hasNextPage + endCursor + } + nodes { + name + isPrivate + pushedAt + } + } + } + } + ` + + repos := make(map[string]RepoVisibility) + var cursor *string + + for { + variables := map[string]any{ + "org": org, + "cursor": cursor, + } + + payload := map[string]any{ + "query": query, + "variables": variables, + } + + bodyBytes, err := json.Marshal(payload) + if err != nil { + return nil, fmt.Errorf("failed to marshal request: %w", err) + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, "https://api.github.com/graphql", bytes.NewReader(bodyBytes)) + if err != nil { + return nil, fmt.Errorf("failed to create request: %w", err) + } + + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Content-Type", "application/json") + + client := &http.Client{Timeout: 30 * time.Second} + resp, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to make request: %w", err) + } + + if resp.StatusCode != http.StatusOK { + _ = resp.Body.Close() //nolint:errcheck // best effort close on error path + return nil, fmt.Errorf("GraphQL request failed with status %d", resp.StatusCode) + } + + var result struct { + Data struct { + Organization struct { + Repositories struct { + PageInfo struct { + EndCursor string + HasNextPage bool + } + Nodes []struct { + PushedAt time.Time + Name string + IsPrivate bool + } + } + } + } + Errors []struct { + Message string + } + } + + if err := json.NewDecoder(resp.Body).Decode(&result); err != nil { + _ = resp.Body.Close() //nolint:errcheck // best effort close on error path + return nil, fmt.Errorf("failed to decode response: %w", err) + } + _ = resp.Body.Close() //nolint:errcheck // best effort close after successful read + + if len(result.Errors) > 0 { + return nil, fmt.Errorf("GraphQL error: %s", result.Errors[0].Message) + } + + // Process repositories and filter by activity date + foundRecentActivity := false + for _, node := range result.Data.Organization.Repositories.Nodes { + if node.PushedAt.Before(since) { + // Since repos are ordered by PUSHED_AT DESC, once we hit one before 'since', + // all remaining repos will also be before 'since' + break + } + foundRecentActivity = true + repos[node.Name] = RepoVisibility{ + Name: node.Name, + IsPrivate: node.IsPrivate, + } + } + + // If we found no recent activity on this page, we can stop + if !foundRecentActivity { + break + } + + // Continue to next page if there is one + if !result.Data.Organization.Repositories.PageInfo.HasNextPage { + break + } + + cursor = &result.Data.Organization.Repositories.PageInfo.EndCursor + } + + slog.Info("Fetched organization repositories with recent activity", + "org", org, + "since", since.Format(time.RFC3339), + "repo_count", len(repos)) + + return repos, nil +}