From c50d248e97d9516ed9c14885dc60d61f40b1d857 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Fri, 27 Mar 2026 17:23:01 -0700 Subject: [PATCH 01/20] feat: supabase auth users sync background runner in dashboard api --- packages/dashboard-api/internal/cfg/model.go | 8 + .../internal/supabaseauthusersync/config.go | 21 ++ .../supabaseauthusersync/processor.go | 107 +++++++ .../internal/supabaseauthusersync/runner.go | 69 ++++ .../supabaseauthusersync/runner_test.go | 300 ++++++++++++++++++ .../internal/supabaseauthusersync/store.go | 102 ++++++ packages/dashboard-api/main.go | 25 ++ ...ashboard_supabase_auth_user_sync_queue.sql | 111 +++++++ .../supabase_auth_user_sync/ack.sql | 3 + .../supabase_auth_user_sync/claim_batch.sql | 17 + .../supabase_auth_user_sync/dead_letter.sql | 8 + .../delete_public_user.sql | 3 + .../supabase_auth_user_sync/get_auth_user.sql | 4 + .../supabase_auth_user_sync/retry.sql | 8 + .../upsert_public_user.sql | 7 + packages/db/queries/ack.sql.go | 20 ++ packages/db/queries/claim_batch.sql.go | 73 +++++ packages/db/queries/dead_letter.sql.go | 30 ++ packages/db/queries/delete_public_user.sql.go | 22 ++ packages/db/queries/get_auth_user.sql.go | 25 ++ packages/db/queries/models.go | 13 + packages/db/queries/retry.sql.go | 33 ++ packages/db/queries/upsert_public_user.sql.go | 31 ++ 23 files changed, 1040 insertions(+) create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/config.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/processor.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/runner.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/runner_test.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/store.go create mode 100644 packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql create mode 100644 packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql create mode 100644 packages/db/queries/ack.sql.go create mode 100644 packages/db/queries/claim_batch.sql.go create mode 100644 packages/db/queries/dead_letter.sql.go create mode 100644 packages/db/queries/delete_public_user.sql.go create mode 100644 packages/db/queries/get_auth_user.sql.go create mode 100644 packages/db/queries/retry.sql.go create mode 100644 packages/db/queries/upsert_public_user.sql.go diff --git a/packages/dashboard-api/internal/cfg/model.go b/packages/dashboard-api/internal/cfg/model.go index f0d9ad10a1..aafed3776c 100644 --- a/packages/dashboard-api/internal/cfg/model.go +++ b/packages/dashboard-api/internal/cfg/model.go @@ -1,6 +1,8 @@ package cfg import ( + "time" + "github.com/caarlos0/env/v11" ) @@ -12,6 +14,12 @@ type Config struct { AuthDBConnectionString string `env:"AUTH_DB_CONNECTION_STRING"` AuthDBReadReplicaConnectionString string `env:"AUTH_DB_READ_REPLICA_CONNECTION_STRING"` + + SupabaseAuthUserSyncEnabled bool `env:"SUPABASE_AUTH_USER_SYNC_ENABLED" envDefault:"false"` + SupabaseAuthUserSyncBatchSize int32 `env:"SUPABASE_AUTH_USER_SYNC_BATCH_SIZE" envDefault:"50"` + SupabaseAuthUserSyncPollInterval time.Duration `env:"SUPABASE_AUTH_USER_SYNC_POLL_INTERVAL" envDefault:"2s"` + SupabaseAuthUserSyncLockTimeout time.Duration `env:"SUPABASE_AUTH_USER_SYNC_LOCK_TIMEOUT" envDefault:"2m"` + SupabaseAuthUserSyncMaxAttempts int32 `env:"SUPABASE_AUTH_USER_SYNC_MAX_ATTEMPTS" envDefault:"20"` } func Parse() (Config, error) { diff --git a/packages/dashboard-api/internal/supabaseauthusersync/config.go b/packages/dashboard-api/internal/supabaseauthusersync/config.go new file mode 100644 index 0000000000..6883064a9e --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/config.go @@ -0,0 +1,21 @@ +package supabaseauthusersync + +import "time" + +type Config struct { + Enabled bool + BatchSize int32 + PollInterval time.Duration + LockTimeout time.Duration + MaxAttempts int32 +} + +func DefaultConfig() Config { + return Config{ + Enabled: false, + BatchSize: 50, + PollInterval: 2 * time.Second, + LockTimeout: 2 * time.Minute, + MaxAttempts: 20, + } +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go new file mode 100644 index 0000000000..dfd8ae79a8 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor.go @@ -0,0 +1,107 @@ +package supabaseauthusersync + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/jackc/pgx/v5" + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type Processor struct { + store *Store + maxAttempts int32 + l logger.Logger +} + +func NewProcessor(store *Store, maxAttempts int32, l logger.Logger) *Processor { + return &Processor{ + store: store, + maxAttempts: maxAttempts, + l: l, + } +} + +func (p *Processor) Process(ctx context.Context, item QueueItem) { + err := p.reconcile(ctx, item) + + if err == nil { + if ackErr := p.store.Ack(ctx, item.ID); ackErr != nil { + p.l.Error(ctx, "failed to ack queue item", + zap.Int64("queue_item_id", item.ID), + zap.String("user_id", item.UserID.String()), + zap.Error(ackErr), + ) + } + + return + } + + p.l.Warn(ctx, "failed to process queue item", + zap.Int64("queue_item_id", item.ID), + zap.String("user_id", item.UserID.String()), + zap.Int32("attempt", item.AttemptCount), + zap.Error(err), + ) + + if item.AttemptCount >= p.maxAttempts { + if dlErr := p.store.DeadLetter(ctx, item.ID, err.Error()); dlErr != nil { + p.l.Error(ctx, "failed to dead-letter queue item", + zap.Int64("queue_item_id", item.ID), + zap.Error(dlErr), + ) + } + + return + } + + backoff := retryBackoff(item.AttemptCount) + + if retryErr := p.store.Retry(ctx, item.ID, backoff, err.Error()); retryErr != nil { + p.l.Error(ctx, "failed to retry queue item", + zap.Int64("queue_item_id", item.ID), + zap.Error(retryErr), + ) + } +} + +func (p *Processor) reconcile(ctx context.Context, item QueueItem) error { + authUser, err := p.store.GetAuthUser(ctx, item.UserID) + + if errors.Is(err, pgx.ErrNoRows) { + if delErr := p.store.DeletePublicUser(ctx, item.UserID); delErr != nil { + return fmt.Errorf("delete public.users %s: %w", item.UserID, delErr) + } + + return nil + } + + if err != nil { + return fmt.Errorf("get auth.users %s: %w", item.UserID, err) + } + + if err = p.store.UpsertPublicUser(ctx, authUser.ID, authUser.Email); err != nil { + return fmt.Errorf("upsert public.users %s: %w", authUser.ID, err) + } + + return nil +} + +func retryBackoff(attempt int32) time.Duration { + switch { + case attempt <= 1: + return 5 * time.Second + case attempt <= 3: + return 30 * time.Second + case attempt <= 6: + return 2 * time.Minute + case attempt <= 10: + return 5 * time.Minute + default: + return 15 * time.Minute + } +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go new file mode 100644 index 0000000000..39de50196c --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner.go @@ -0,0 +1,69 @@ +package supabaseauthusersync + +import ( + "context" + "time" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type Runner struct { + cfg Config + store *Store + processor *Processor + lockOwner string + l logger.Logger +} + +func NewRunner(cfg Config, store *Store, lockOwner string, l logger.Logger) *Runner { + return &Runner{ + cfg: cfg, + store: store, + processor: NewProcessor(store, cfg.MaxAttempts, l), + lockOwner: lockOwner, + l: l, + } +} + +func (r *Runner) Run(ctx context.Context) error { + r.l.Info(ctx, "starting supabase auth user sync worker", + zap.String("lock_owner", r.lockOwner), + zap.Duration("poll_interval", r.cfg.PollInterval), + zap.Int32("batch_size", r.cfg.BatchSize), + ) + + ticker := time.NewTicker(r.cfg.PollInterval) + defer ticker.Stop() + + for { + select { + case <-ctx.Done(): + r.l.Info(ctx, "stopping supabase auth user sync worker") + + return ctx.Err() + case <-ticker.C: + r.poll(ctx) + } + } +} + +func (r *Runner) poll(ctx context.Context) { + items, err := r.store.ClaimBatch(ctx, r.lockOwner, r.cfg.LockTimeout, r.cfg.BatchSize) + if err != nil { + r.l.Error(ctx, "failed to claim queue batch", zap.Error(err)) + + return + } + + if len(items) == 0 { + return + } + + r.l.Debug(ctx, "claimed queue batch", zap.Int("count", len(items))) + + for _, item := range items { + r.processor.Process(ctx, item) + } +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go new file mode 100644 index 0000000000..79fe007842 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -0,0 +1,300 @@ +package supabaseauthusersync + +import ( + "os/exec" + "path/filepath" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/e2b-dev/infra/packages/db/pkg/testutils" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +func setupTestDB(t *testing.T) *testutils.Database { + t.Helper() + + db := testutils.SetupDatabase(t) + + repoRoot := gitRoot(t) + migrationSQL := readFile(t, filepath.Join( + repoRoot, + "packages", "db", "pkg", "dashboard", "migrations", + "20260328000000_dashboard_supabase_auth_user_sync_queue.sql", + )) + + upSQL := extractGooseUp(migrationSQL) + err := db.AuthDb.TestsRawSQL(t.Context(), upSQL) + require.NoError(t, err, "failed to apply dashboard auth sync migration") + + return db +} + +func gitRoot(t *testing.T) string { + t.Helper() + + cmd := exec.CommandContext(t.Context(), "git", "rev-parse", "--show-toplevel") + output, err := cmd.Output() + require.NoError(t, err) + + return strings.TrimSpace(string(output)) +} + +func readFile(t *testing.T, path string) string { + t.Helper() + + cmd := exec.CommandContext(t.Context(), "cat", path) + output, err := cmd.Output() + require.NoError(t, err) + + return string(output) +} + +func extractGooseUp(sql string) string { + parts := strings.SplitN(sql, "-- +goose Down", 2) + up := parts[0] + up = strings.ReplaceAll(up, "-- +goose Up", "") + up = strings.ReplaceAll(up, "-- +goose StatementBegin", "") + up = strings.ReplaceAll(up, "-- +goose StatementEnd", "") + + return up +} + +func insertAuthUser(t *testing.T, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() + err := db.AuthDb.TestsRawSQL(t.Context(), + "INSERT INTO auth.users (id, email) VALUES ($1, $2)", userID, email) + require.NoError(t, err) +} + +func updateAuthUserEmail(t *testing.T, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() + err := db.AuthDb.TestsRawSQL(t.Context(), + "UPDATE auth.users SET email = $1 WHERE id = $2", email, userID) + require.NoError(t, err) +} + +func deleteAuthUser(t *testing.T, db *testutils.Database, userID uuid.UUID) { + t.Helper() + err := db.AuthDb.TestsRawSQL(t.Context(), + "DELETE FROM auth.users WHERE id = $1", userID) + require.NoError(t, err) +} + +func getPublicUserEmail(t *testing.T, db *testutils.Database, userID uuid.UUID) (string, bool) { + t.Helper() + + var email string + var found bool + + err := db.AuthDb.TestsRawSQLQuery(t.Context(), + "SELECT email FROM public.users WHERE id = $1", + func(rows pgx.Rows) error { + if rows.Next() { + found = true + return rows.Scan(&email) + } + return nil + }, + userID, + ) + require.NoError(t, err) + + return email, found +} + +func queueDepth(t *testing.T, db *testutils.Database) int { + t.Helper() + + var count int + + err := db.AuthDb.TestsRawSQLQuery(t.Context(), + "SELECT count(*) FROM auth.user_sync_queue WHERE dead_lettered_at IS NULL", + func(rows pgx.Rows) error { + if rows.Next() { + return rows.Scan(&count) + } + return nil + }, + ) + require.NoError(t, err) + + return count +} + +func TestInsertAuthUserCreatesQueueRow(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + + userID := uuid.New() + insertAuthUser(t, db, userID, "test@example.com") + + depth := queueDepth(t, db) + assert.Equal(t, 1, depth) +} + +func TestProcessorReconciles_Insert(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + store := NewStore(db.SqlcClient.Queries) + l := logger.NewNopLogger() + proc := NewProcessor(store, 5, l) + + userID := uuid.New() + insertAuthUser(t, db, userID, "alice@example.com") + + items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + require.Len(t, items, 1) + + proc.Process(t.Context(), items[0]) + + email, found := getPublicUserEmail(t, db, userID) + assert.True(t, found) + assert.Equal(t, "alice@example.com", email) + + assert.Equal(t, 0, queueDepth(t, db)) +} + +func TestProcessorReconciles_UpdateEmail(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + store := NewStore(db.SqlcClient.Queries) + l := logger.NewNopLogger() + proc := NewProcessor(store, 5, l) + + userID := uuid.New() + insertAuthUser(t, db, userID, "old@example.com") + + items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + proc.Process(t.Context(), items[0]) + + updateAuthUserEmail(t, db, userID, "new@example.com") + + items, err = store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + require.Len(t, items, 1) + proc.Process(t.Context(), items[0]) + + email, found := getPublicUserEmail(t, db, userID) + assert.True(t, found) + assert.Equal(t, "new@example.com", email) +} + +func TestProcessorReconciles_Delete(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + store := NewStore(db.SqlcClient.Queries) + l := logger.NewNopLogger() + proc := NewProcessor(store, 5, l) + + userID := uuid.New() + insertAuthUser(t, db, userID, "doomed@example.com") + + items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + proc.Process(t.Context(), items[0]) + + _, found := getPublicUserEmail(t, db, userID) + require.True(t, found) + + deleteAuthUser(t, db, userID) + + items, err = store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + require.Len(t, items, 1) + proc.Process(t.Context(), items[0]) + + _, found = getPublicUserEmail(t, db, userID) + assert.False(t, found) +} + +func TestDuplicateQueueRowsConverge(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + store := NewStore(db.SqlcClient.Queries) + l := logger.NewNopLogger() + proc := NewProcessor(store, 5, l) + + userID := uuid.New() + insertAuthUser(t, db, userID, "dup@example.com") + + err := db.AuthDb.TestsRawSQL(t.Context(), + "INSERT INTO auth.user_sync_queue (user_id, operation) VALUES ($1, 'upsert')", + userID) + require.NoError(t, err) + + items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + require.NoError(t, err) + assert.GreaterOrEqual(t, len(items), 2) + + for _, item := range items { + proc.Process(t.Context(), item) + } + + email, found := getPublicUserEmail(t, db, userID) + assert.True(t, found) + assert.Equal(t, "dup@example.com", email) + assert.Equal(t, 0, queueDepth(t, db)) +} + +func TestMultiInstanceClaimNoDoubleProcessing(t *testing.T) { + if testing.Short() { + t.Skip("skipping integration test") + } + + db := setupTestDB(t) + + for i := range 10 { + userID := uuid.New() + insertAuthUser(t, db, userID, "user"+string(rune('a'+i))+"@example.com") + } + + store1 := NewStore(db.SqlcClient.Queries) + store2 := NewStore(db.SqlcClient.Queries) + + var claimed1, claimed2 atomic.Int32 + + ctx := t.Context() + + items1, err := store1.ClaimBatch(ctx, "worker-1", 2*time.Minute, 10) + require.NoError(t, err) + claimed1.Store(int32(len(items1))) + + items2, err := store2.ClaimBatch(ctx, "worker-2", 2*time.Minute, 10) + require.NoError(t, err) + claimed2.Store(int32(len(items2))) + + total := claimed1.Load() + claimed2.Load() + assert.Equal(t, int32(10), total, "all items should be claimed exactly once across both workers") + + ids := make(map[int64]bool) + for _, item := range items1 { + ids[item.ID] = true + } + for _, item := range items2 { + assert.False(t, ids[item.ID], "item %d claimed by both workers", item.ID) + } +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/store.go b/packages/dashboard-api/internal/supabaseauthusersync/store.go new file mode 100644 index 0000000000..019e4bf6c6 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/store.go @@ -0,0 +1,102 @@ +package supabaseauthusersync + +import ( + "context" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" + + "github.com/e2b-dev/infra/packages/db/queries" +) + +type QueueItem struct { + ID int64 + UserID uuid.UUID + Operation string + CreatedAt time.Time + AttemptCount int32 +} + +type AuthUser struct { + ID uuid.UUID + Email string +} + +type Store struct { + q *queries.Queries +} + +func NewStore(q *queries.Queries) *Store { + return &Store{q: q} +} + +func (s *Store) ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) { + rows, err := s.q.ClaimUserSyncQueueBatch(ctx, queries.ClaimUserSyncQueueBatchParams{ + LockOwner: lockOwner, + LockTimeout: durationToInterval(lockTimeout), + BatchSize: batchSize, + }) + if err != nil { + return nil, err + } + + items := make([]QueueItem, len(rows)) + for i, r := range rows { + items[i] = QueueItem{ + ID: r.ID, + UserID: r.UserID, + Operation: r.Operation, + CreatedAt: r.CreatedAt, + AttemptCount: r.AttemptCount, + } + } + + return items, nil +} + +func (s *Store) Ack(ctx context.Context, id int64) error { + return s.q.AckUserSyncQueueItem(ctx, id) +} + +func (s *Store) Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error { + return s.q.RetryUserSyncQueueItem(ctx, queries.RetryUserSyncQueueItemParams{ + ID: id, + Backoff: durationToInterval(backoff), + LastError: lastError, + }) +} + +func (s *Store) DeadLetter(ctx context.Context, id int64, lastError string) error { + return s.q.DeadLetterUserSyncQueueItem(ctx, queries.DeadLetterUserSyncQueueItemParams{ + ID: id, + LastError: lastError, + }) +} + +func (s *Store) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) { + row, err := s.q.GetAuthUserByID(ctx, userID) + if err != nil { + return nil, err + } + + return &AuthUser{ID: row.ID, Email: row.Email}, nil +} + +func (s *Store) UpsertPublicUser(ctx context.Context, id uuid.UUID, email string) error { + return s.q.UpsertPublicUser(ctx, queries.UpsertPublicUserParams{ + ID: id, + Email: email, + }) +} + +func (s *Store) DeletePublicUser(ctx context.Context, id uuid.UUID) error { + return s.q.DeletePublicUser(ctx, id) +} + +func durationToInterval(d time.Duration) pgtype.Interval { + return pgtype.Interval{ + Microseconds: d.Microseconds(), + Valid: true, + } +} diff --git a/packages/dashboard-api/main.go b/packages/dashboard-api/main.go index 4b40634473..0f0bac4ddb 100644 --- a/packages/dashboard-api/main.go +++ b/packages/dashboard-api/main.go @@ -30,6 +30,7 @@ import ( "github.com/e2b-dev/infra/packages/dashboard-api/internal/api" "github.com/e2b-dev/infra/packages/dashboard-api/internal/cfg" "github.com/e2b-dev/infra/packages/dashboard-api/internal/handlers" + "github.com/e2b-dev/infra/packages/dashboard-api/internal/supabaseauthusersync" sqlcdb "github.com/e2b-dev/infra/packages/db/client" authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" "github.com/e2b-dev/infra/packages/db/pkg/pool" @@ -229,6 +230,30 @@ func run() int { wg := sync.WaitGroup{} + if config.SupabaseAuthUserSyncEnabled { + workerLogger := l.With(zap.String("worker", "supabase_auth_user_sync")) + syncStore := supabaseauthusersync.NewStore(db.Queries) + syncRunner := supabaseauthusersync.NewRunner( + supabaseauthusersync.Config{ + Enabled: true, + BatchSize: config.SupabaseAuthUserSyncBatchSize, + PollInterval: config.SupabaseAuthUserSyncPollInterval, + LockTimeout: config.SupabaseAuthUserSyncLockTimeout, + MaxAttempts: config.SupabaseAuthUserSyncMaxAttempts, + }, + syncStore, + serviceInstanceID, + workerLogger, + ) + + wg.Go(func() { + if err := syncRunner.Run(signalCtx); err != nil && !errors.Is(err, context.Canceled) { + l.Error(ctx, "supabase auth user sync worker error", zap.Error(err)) + errorCode.Add(1) + } + }) + } + wg.Go(func() { <-signalCtx.Done() l.Info(ctx, "Shutting down dashboard-api service...") diff --git a/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql b/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql new file mode 100644 index 0000000000..90ceda9d06 --- /dev/null +++ b/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql @@ -0,0 +1,111 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE TABLE auth.user_sync_queue ( + id BIGSERIAL PRIMARY KEY, + user_id UUID NOT NULL, + operation TEXT NOT NULL CHECK (operation IN ('upsert', 'delete')), + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + next_attempt_at TIMESTAMPTZ NOT NULL DEFAULT now(), + locked_at TIMESTAMPTZ NULL, + lock_owner TEXT NULL, + attempt_count INT NOT NULL DEFAULT 0, + last_error TEXT NULL, + dead_lettered_at TIMESTAMPTZ NULL +); + +CREATE INDEX auth_user_sync_queue_pending_idx + ON auth.user_sync_queue (id) + WHERE dead_lettered_at IS NULL AND locked_at IS NULL; + +CREATE INDEX auth_user_sync_queue_user_idx + ON auth.user_sync_queue (user_id); + +GRANT INSERT ON auth.user_sync_queue TO trigger_user; +GRANT USAGE, SELECT ON SEQUENCE auth.user_sync_queue_id_seq TO trigger_user; + +-- Replace direct insert-sync with enqueue +CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + INSERT INTO auth.user_sync_queue (user_id, operation) + VALUES (NEW.id, 'upsert'); + RETURN NEW; +END; +$func$ SECURITY DEFINER SET search_path = public; + +-- Replace direct update-sync with enqueue (only when mirrored fields change) +CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + IF OLD.email IS DISTINCT FROM NEW.email THEN + INSERT INTO auth.user_sync_queue (user_id, operation) + VALUES (NEW.id, 'upsert'); + END IF; + RETURN NEW; +END; +$func$ SECURITY DEFINER SET search_path = public; + +-- Replace direct delete-sync with enqueue +CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + INSERT INTO auth.user_sync_queue (user_id, operation) + VALUES (OLD.id, 'delete'); + RETURN OLD; +END; +$func$ SECURITY DEFINER SET search_path = public; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +-- Restore direct insert-sync +CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + INSERT INTO public.users (id, email) + VALUES (NEW.id, NEW.email); + RETURN NEW; +END; +$func$ SECURITY DEFINER SET search_path = public; + +-- Restore direct update-sync +CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + UPDATE public.users + SET email = NEW.email, + updated_at = now() + WHERE id = NEW.id; + + IF NOT FOUND THEN + RAISE EXCEPTION 'User with id % does not exist in public.users', NEW.id; + END IF; + + RETURN NEW; +END; +$func$ SECURITY DEFINER SET search_path = public; + +-- Restore direct delete-sync +CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER +LANGUAGE plpgsql +AS $func$ +BEGIN + DELETE FROM public.users WHERE id = OLD.id; + RETURN OLD; +END; +$func$ SECURITY DEFINER SET search_path = public; + +REVOKE INSERT ON auth.user_sync_queue FROM trigger_user; +REVOKE USAGE, SELECT ON SEQUENCE auth.user_sync_queue_id_seq FROM trigger_user; + +DROP TABLE auth.user_sync_queue; + +-- +goose StatementEnd diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql new file mode 100644 index 0000000000..f2fe8ed889 --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql @@ -0,0 +1,3 @@ +-- name: AckUserSyncQueueItem :exec +DELETE FROM auth.user_sync_queue +WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql new file mode 100644 index 0000000000..af8c29aeaf --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql @@ -0,0 +1,17 @@ +-- name: ClaimUserSyncQueueBatch :many +UPDATE auth.user_sync_queue +SET + locked_at = now(), + lock_owner = sqlc.arg(lock_owner)::text, + attempt_count = attempt_count + 1 +WHERE id IN ( + SELECT id + FROM auth.user_sync_queue + WHERE dead_lettered_at IS NULL + AND next_attempt_at <= now() + AND (locked_at IS NULL OR locked_at < now() - sqlc.arg(lock_timeout)::interval) + ORDER BY id + FOR UPDATE SKIP LOCKED + LIMIT sqlc.arg(batch_size)::int +) +RETURNING id, user_id, operation, created_at, attempt_count; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql new file mode 100644 index 0000000000..fd4d6a87e9 --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql @@ -0,0 +1,8 @@ +-- name: DeadLetterUserSyncQueueItem :exec +UPDATE auth.user_sync_queue +SET + locked_at = NULL, + lock_owner = NULL, + dead_lettered_at = now(), + last_error = sqlc.arg(last_error)::text +WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql new file mode 100644 index 0000000000..492f1051cd --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql @@ -0,0 +1,3 @@ +-- name: DeletePublicUser :exec +DELETE FROM public.users +WHERE id = sqlc.arg(id)::uuid; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql new file mode 100644 index 0000000000..414b3fe3cf --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql @@ -0,0 +1,4 @@ +-- name: GetAuthUserByID :one +SELECT id, email +FROM auth.users +WHERE id = sqlc.arg(user_id)::uuid; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql new file mode 100644 index 0000000000..cdc21a34d1 --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql @@ -0,0 +1,8 @@ +-- name: RetryUserSyncQueueItem :exec +UPDATE auth.user_sync_queue +SET + locked_at = NULL, + lock_owner = NULL, + next_attempt_at = now() + sqlc.arg(backoff)::interval, + last_error = sqlc.arg(last_error)::text +WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql new file mode 100644 index 0000000000..ebabd969e6 --- /dev/null +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql @@ -0,0 +1,7 @@ +-- name: UpsertPublicUser :exec +INSERT INTO public.users (id, email) +VALUES (sqlc.arg(id)::uuid, sqlc.arg(email)::text) +ON CONFLICT (id) +DO UPDATE SET + email = EXCLUDED.email, + updated_at = now(); diff --git a/packages/db/queries/ack.sql.go b/packages/db/queries/ack.sql.go new file mode 100644 index 0000000000..66a517f0a5 --- /dev/null +++ b/packages/db/queries/ack.sql.go @@ -0,0 +1,20 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: ack.sql + +package queries + +import ( + "context" +) + +const ackUserSyncQueueItem = `-- name: AckUserSyncQueueItem :exec +DELETE FROM auth.user_sync_queue +WHERE id = $1::bigint +` + +func (q *Queries) AckUserSyncQueueItem(ctx context.Context, id int64) error { + _, err := q.db.Exec(ctx, ackUserSyncQueueItem, id) + return err +} diff --git a/packages/db/queries/claim_batch.sql.go b/packages/db/queries/claim_batch.sql.go new file mode 100644 index 0000000000..f6797712b5 --- /dev/null +++ b/packages/db/queries/claim_batch.sql.go @@ -0,0 +1,73 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: claim_batch.sql + +package queries + +import ( + "context" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5/pgtype" +) + +const claimUserSyncQueueBatch = `-- name: ClaimUserSyncQueueBatch :many +UPDATE auth.user_sync_queue +SET + locked_at = now(), + lock_owner = $1::text, + attempt_count = attempt_count + 1 +WHERE id IN ( + SELECT id + FROM auth.user_sync_queue + WHERE dead_lettered_at IS NULL + AND next_attempt_at <= now() + AND (locked_at IS NULL OR locked_at < now() - $2::interval) + ORDER BY id + FOR UPDATE SKIP LOCKED + LIMIT $3::int +) +RETURNING id, user_id, operation, created_at, attempt_count +` + +type ClaimUserSyncQueueBatchParams struct { + LockOwner string + LockTimeout pgtype.Interval + BatchSize int32 +} + +type ClaimUserSyncQueueBatchRow struct { + ID int64 + UserID uuid.UUID + Operation string + CreatedAt time.Time + AttemptCount int32 +} + +func (q *Queries) ClaimUserSyncQueueBatch(ctx context.Context, arg ClaimUserSyncQueueBatchParams) ([]ClaimUserSyncQueueBatchRow, error) { + rows, err := q.db.Query(ctx, claimUserSyncQueueBatch, arg.LockOwner, arg.LockTimeout, arg.BatchSize) + if err != nil { + return nil, err + } + defer rows.Close() + var items []ClaimUserSyncQueueBatchRow + for rows.Next() { + var i ClaimUserSyncQueueBatchRow + if err := rows.Scan( + &i.ID, + &i.UserID, + &i.Operation, + &i.CreatedAt, + &i.AttemptCount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/packages/db/queries/dead_letter.sql.go b/packages/db/queries/dead_letter.sql.go new file mode 100644 index 0000000000..3df0f8c511 --- /dev/null +++ b/packages/db/queries/dead_letter.sql.go @@ -0,0 +1,30 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: dead_letter.sql + +package queries + +import ( + "context" +) + +const deadLetterUserSyncQueueItem = `-- name: DeadLetterUserSyncQueueItem :exec +UPDATE auth.user_sync_queue +SET + locked_at = NULL, + lock_owner = NULL, + dead_lettered_at = now(), + last_error = $1::text +WHERE id = $2::bigint +` + +type DeadLetterUserSyncQueueItemParams struct { + LastError string + ID int64 +} + +func (q *Queries) DeadLetterUserSyncQueueItem(ctx context.Context, arg DeadLetterUserSyncQueueItemParams) error { + _, err := q.db.Exec(ctx, deadLetterUserSyncQueueItem, arg.LastError, arg.ID) + return err +} diff --git a/packages/db/queries/delete_public_user.sql.go b/packages/db/queries/delete_public_user.sql.go new file mode 100644 index 0000000000..585d8c977c --- /dev/null +++ b/packages/db/queries/delete_public_user.sql.go @@ -0,0 +1,22 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: delete_public_user.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const deletePublicUser = `-- name: DeletePublicUser :exec +DELETE FROM public.users +WHERE id = $1::uuid +` + +func (q *Queries) DeletePublicUser(ctx context.Context, id uuid.UUID) error { + _, err := q.db.Exec(ctx, deletePublicUser, id) + return err +} diff --git a/packages/db/queries/get_auth_user.sql.go b/packages/db/queries/get_auth_user.sql.go new file mode 100644 index 0000000000..4b7c341df2 --- /dev/null +++ b/packages/db/queries/get_auth_user.sql.go @@ -0,0 +1,25 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: get_auth_user.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const getAuthUserByID = `-- name: GetAuthUserByID :one +SELECT id, email +FROM auth.users +WHERE id = $1::uuid +` + +func (q *Queries) GetAuthUserByID(ctx context.Context, userID uuid.UUID) (AuthUser, error) { + row := q.db.QueryRow(ctx, getAuthUserByID, userID) + var i AuthUser + err := row.Scan(&i.ID, &i.Email) + return i, err +} diff --git a/packages/db/queries/models.go b/packages/db/queries/models.go index 6c960a7a59..9f0b0cf689 100644 --- a/packages/db/queries/models.go +++ b/packages/db/queries/models.go @@ -54,6 +54,19 @@ type AuthUser struct { Email string } +type AuthUserSyncQueue struct { + ID int64 + UserID uuid.UUID + Operation string + CreatedAt time.Time + NextAttemptAt time.Time + LockedAt *time.Time + LockOwner *string + AttemptCount int32 + LastError *string + DeadLetteredAt *time.Time +} + type BillingSandboxLog struct { SandboxID string EnvID string diff --git a/packages/db/queries/retry.sql.go b/packages/db/queries/retry.sql.go new file mode 100644 index 0000000000..ccbb7da45b --- /dev/null +++ b/packages/db/queries/retry.sql.go @@ -0,0 +1,33 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: retry.sql + +package queries + +import ( + "context" + + "github.com/jackc/pgx/v5/pgtype" +) + +const retryUserSyncQueueItem = `-- name: RetryUserSyncQueueItem :exec +UPDATE auth.user_sync_queue +SET + locked_at = NULL, + lock_owner = NULL, + next_attempt_at = now() + $1::interval, + last_error = $2::text +WHERE id = $3::bigint +` + +type RetryUserSyncQueueItemParams struct { + Backoff pgtype.Interval + LastError string + ID int64 +} + +func (q *Queries) RetryUserSyncQueueItem(ctx context.Context, arg RetryUserSyncQueueItemParams) error { + _, err := q.db.Exec(ctx, retryUserSyncQueueItem, arg.Backoff, arg.LastError, arg.ID) + return err +} diff --git a/packages/db/queries/upsert_public_user.sql.go b/packages/db/queries/upsert_public_user.sql.go new file mode 100644 index 0000000000..dc0fd6eba3 --- /dev/null +++ b/packages/db/queries/upsert_public_user.sql.go @@ -0,0 +1,31 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: upsert_public_user.sql + +package queries + +import ( + "context" + + "github.com/google/uuid" +) + +const upsertPublicUser = `-- name: UpsertPublicUser :exec +INSERT INTO public.users (id, email) +VALUES ($1::uuid, $2::text) +ON CONFLICT (id) +DO UPDATE SET + email = EXCLUDED.email, + updated_at = now() +` + +type UpsertPublicUserParams struct { + ID uuid.UUID + Email string +} + +func (q *Queries) UpsertPublicUser(ctx context.Context, arg UpsertPublicUserParams) error { + _, err := q.db.Exec(ctx, upsertPublicUser, arg.ID, arg.Email) + return err +} From 68a626578943ce40a89aba95493c06179b248093 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Sat, 28 Mar 2026 00:25:14 +0000 Subject: [PATCH 02/20] chore: auto-commit generated changes --- packages/dashboard-api/go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/dashboard-api/go.mod b/packages/dashboard-api/go.mod index cd69e6352b..65de5392b9 100644 --- a/packages/dashboard-api/go.mod +++ b/packages/dashboard-api/go.mod @@ -23,6 +23,7 @@ require ( github.com/jackc/pgx/v5 v5.7.5 github.com/oapi-codegen/gin-middleware v1.0.2 github.com/oapi-codegen/runtime v1.1.1 + github.com/stretchr/testify v1.11.1 go.uber.org/zap v1.27.1 ) @@ -117,7 +118,6 @@ require ( github.com/shirou/gopsutil/v4 v4.25.9 // indirect github.com/shopspring/decimal v1.4.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/stretchr/testify v1.11.1 // indirect github.com/testcontainers/testcontainers-go v0.40.0 // indirect github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect From 9d8fe6359e239e3681b52bcfd549fcb0369837b5 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Mon, 30 Mar 2026 11:53:13 -0700 Subject: [PATCH 03/20] feat(db): enhance auth user sync triggers to retain direct operations while enqueuing for processing --- ...ashboard_supabase_auth_user_sync_queue.sql | 23 ++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql b/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql index 90ceda9d06..63ac79e1de 100644 --- a/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql +++ b/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql @@ -24,37 +24,54 @@ CREATE INDEX auth_user_sync_queue_user_idx GRANT INSERT ON auth.user_sync_queue TO trigger_user; GRANT USAGE, SELECT ON SEQUENCE auth.user_sync_queue_id_seq TO trigger_user; --- Replace direct insert-sync with enqueue +-- Keep direct insert-sync and also enqueue CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN + INSERT INTO public.users (id, email) + VALUES (NEW.id, NEW.email); + INSERT INTO auth.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); + RETURN NEW; END; $func$ SECURITY DEFINER SET search_path = public; --- Replace direct update-sync with enqueue (only when mirrored fields change) +-- Keep direct update-sync and also enqueue when mirrored fields change CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN + UPDATE public.users + SET email = NEW.email, + updated_at = now() + WHERE id = NEW.id; + + IF NOT FOUND THEN + RAISE EXCEPTION 'User with id % does not exist in public.users', NEW.id; + END IF; + IF OLD.email IS DISTINCT FROM NEW.email THEN INSERT INTO auth.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); END IF; + RETURN NEW; END; $func$ SECURITY DEFINER SET search_path = public; --- Replace direct delete-sync with enqueue +-- Keep direct delete-sync and also enqueue CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN + DELETE FROM public.users WHERE id = OLD.id; + INSERT INTO auth.user_sync_queue (user_id, operation) VALUES (OLD.id, 'delete'); + RETURN OLD; END; $func$ SECURITY DEFINER SET search_path = public; From 56154df8b9a33e98d39cb134226137825b073532 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Mon, 30 Mar 2026 14:07:19 -0700 Subject: [PATCH 04/20] feat(sync): implement user sync queue with enhanced error handling and recovery mechanisms - Updated the sync runner to use `RunWithRestart` for improved error recovery. - Introduced a new `UserSyncQueue` model to manage user synchronization tasks. - Added SQL migration for creating the `user_sync_queue` table with necessary triggers. - Implemented tests for the processor and supervisor to ensure robust handling of retries and panics. - Refactored existing queries to target the new `public.user_sync_queue` table. --- .../supabaseauthusersync/processor.go | 34 +++++- .../supabaseauthusersync/processor_test.go | 109 +++++++++++++++++ .../supabaseauthusersync/runner_test.go | 4 +- .../supabaseauthusersync/supervisor.go | 115 ++++++++++++++++++ .../supabaseauthusersync/supervisor_test.go | 71 +++++++++++ packages/dashboard-api/main.go | 2 +- ...ashboard_supabase_auth_user_sync_queue.sql | 33 +++-- packages/db/pkg/auth/queries/models.go | 13 ++ .../supabase_auth_user_sync/ack.sql | 2 +- .../supabase_auth_user_sync/claim_batch.sql | 4 +- .../supabase_auth_user_sync/dead_letter.sql | 2 +- .../supabase_auth_user_sync/retry.sql | 2 +- packages/db/queries/ack.sql.go | 2 +- packages/db/queries/claim_batch.sql.go | 4 +- packages/db/queries/dead_letter.sql.go | 2 +- packages/db/queries/models.go | 26 ++-- packages/db/queries/retry.sql.go | 2 +- 17 files changed, 387 insertions(+), 40 deletions(-) create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/processor_test.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/supervisor.go create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go rename packages/db/{pkg/dashboard => }/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql (76%) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go index dfd8ae79a8..28b0816f64 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor.go @@ -4,21 +4,32 @@ import ( "context" "errors" "fmt" + "runtime/debug" "time" + "github.com/google/uuid" "github.com/jackc/pgx/v5" "go.uber.org/zap" "github.com/e2b-dev/infra/packages/shared/pkg/logger" ) +type processorStore interface { + Ack(ctx context.Context, id int64) error + Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error + DeadLetter(ctx context.Context, id int64, lastError string) error + GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) + UpsertPublicUser(ctx context.Context, id uuid.UUID, email string) error + DeletePublicUser(ctx context.Context, id uuid.UUID) error +} + type Processor struct { - store *Store + store processorStore maxAttempts int32 l logger.Logger } -func NewProcessor(store *Store, maxAttempts int32, l logger.Logger) *Processor { +func NewProcessor(store processorStore, maxAttempts int32, l logger.Logger) *Processor { return &Processor{ store: store, maxAttempts: maxAttempts, @@ -27,7 +38,7 @@ func NewProcessor(store *Store, maxAttempts int32, l logger.Logger) *Processor { } func (p *Processor) Process(ctx context.Context, item QueueItem) { - err := p.reconcile(ctx, item) + err := p.processOnce(ctx, item) if err == nil { if ackErr := p.store.Ack(ctx, item.ID); ackErr != nil { @@ -69,6 +80,23 @@ func (p *Processor) Process(ctx context.Context, item QueueItem) { } } +func (p *Processor) processOnce(ctx context.Context, item QueueItem) (err error) { + defer func() { + if recovered := recover(); recovered != nil { + p.l.Error(ctx, "panic while processing queue item", + zap.Int64("queue_item_id", item.ID), + zap.String("user_id", item.UserID.String()), + zap.String("panic", fmt.Sprint(recovered)), + zap.String("stack", string(debug.Stack())), + ) + + err = fmt.Errorf("panic while processing queue item: %v", recovered) + } + }() + + return p.reconcile(ctx, item) +} + func (p *Processor) reconcile(ctx context.Context, item QueueItem) error { authUser, err := p.store.GetAuthUser(ctx, item.UserID) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go new file mode 100644 index 0000000000..23072901fa --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go @@ -0,0 +1,109 @@ +package supabaseauthusersync + +import ( + "context" + "testing" + "time" + + "github.com/google/uuid" + "github.com/stretchr/testify/require" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type retryCall struct { + id int64 + backoff time.Duration + lastError string +} + +type deadLetterCall struct { + id int64 + lastError string +} + +type fakeProcessorStore struct { + getAuthUserFn func(context.Context, uuid.UUID) (*AuthUser, error) + + ackCalls []int64 + retryCalls []retryCall + deadLetterCalls []deadLetterCall +} + +func (s *fakeProcessorStore) Ack(_ context.Context, id int64) error { + s.ackCalls = append(s.ackCalls, id) + return nil +} + +func (s *fakeProcessorStore) Retry(_ context.Context, id int64, backoff time.Duration, lastError string) error { + s.retryCalls = append(s.retryCalls, retryCall{ + id: id, + backoff: backoff, + lastError: lastError, + }) + return nil +} + +func (s *fakeProcessorStore) DeadLetter(_ context.Context, id int64, lastError string) error { + s.deadLetterCalls = append(s.deadLetterCalls, deadLetterCall{ + id: id, + lastError: lastError, + }) + return nil +} + +func (s *fakeProcessorStore) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) { + return s.getAuthUserFn(ctx, userID) +} + +func (s *fakeProcessorStore) UpsertPublicUser(_ context.Context, _ uuid.UUID, _ string) error { + return nil +} + +func (s *fakeProcessorStore) DeletePublicUser(_ context.Context, _ uuid.UUID) error { + return nil +} + +func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { + store := &fakeProcessorStore{ + getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { + panic("boom") + }, + } + processor := NewProcessor(store, 3, logger.NewNopLogger()) + item := QueueItem{ + ID: 1, + UserID: uuid.New(), + AttemptCount: 1, + } + + require.NotPanics(t, func() { + processor.Process(context.Background(), item) + }) + require.Empty(t, store.ackCalls) + require.Len(t, store.retryCalls, 1) + require.Contains(t, store.retryCalls[0].lastError, "panic while processing queue item") + require.Empty(t, store.deadLetterCalls) +} + +func TestProcessorProcessDeadLettersRecoveredPanicAtMaxAttempts(t *testing.T) { + store := &fakeProcessorStore{ + getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { + panic("boom") + }, + } + processor := NewProcessor(store, 3, logger.NewNopLogger()) + item := QueueItem{ + ID: 1, + UserID: uuid.New(), + AttemptCount: 3, + } + + require.NotPanics(t, func() { + processor.Process(context.Background(), item) + }) + require.Empty(t, store.ackCalls) + require.Empty(t, store.retryCalls) + require.Len(t, store.deadLetterCalls, 1) + require.Contains(t, store.deadLetterCalls[0].lastError, "panic while processing queue item") +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go index 79fe007842..b578185745 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -115,7 +115,7 @@ func queueDepth(t *testing.T, db *testutils.Database) int { var count int err := db.AuthDb.TestsRawSQLQuery(t.Context(), - "SELECT count(*) FROM auth.user_sync_queue WHERE dead_lettered_at IS NULL", + "SELECT count(*) FROM public.user_sync_queue WHERE dead_lettered_at IS NULL", func(rows pgx.Rows) error { if rows.Next() { return rows.Scan(&count) @@ -242,7 +242,7 @@ func TestDuplicateQueueRowsConverge(t *testing.T) { insertAuthUser(t, db, userID, "dup@example.com") err := db.AuthDb.TestsRawSQL(t.Context(), - "INSERT INTO auth.user_sync_queue (user_id, operation) VALUES ($1, 'upsert')", + "INSERT INTO public.user_sync_queue (user_id, operation) VALUES ($1, 'upsert')", userID) require.NoError(t, err) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go new file mode 100644 index 0000000000..ce8dafb463 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go @@ -0,0 +1,115 @@ +package supabaseauthusersync + +import ( + "context" + "errors" + "fmt" + "runtime/debug" + "time" + + "go.uber.org/zap" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +const ( + defaultRestartDelay = time.Second + maxRestartDelay = 30 * time.Second + healthyRunResetThreshold = time.Minute +) + +type supervisorConfig struct { + RestartDelay time.Duration + MaxRestartDelay time.Duration + HealthyRunResetAfter time.Duration +} + +func defaultSupervisorConfig() supervisorConfig { + return supervisorConfig{ + RestartDelay: defaultRestartDelay, + MaxRestartDelay: maxRestartDelay, + HealthyRunResetAfter: healthyRunResetThreshold, + } +} + +func (r *Runner) RunWithRestart(ctx context.Context) error { + return supervise(ctx, r.l, defaultSupervisorConfig(), r.Run) +} + +func supervise(ctx context.Context, l logger.Logger, cfg supervisorConfig, run func(context.Context) error) error { + restartAttempt := 0 + + for { + startedAt := time.Now() + err := runRecovering(ctx, l, run) + runtime := time.Since(startedAt) + + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + return err + } + if ctx.Err() != nil { + return ctx.Err() + } + + if runtime >= cfg.HealthyRunResetAfter { + restartAttempt = 0 + } + restartAttempt++ + + delay := restartBackoff(restartAttempt, cfg.RestartDelay, cfg.MaxRestartDelay) + l.Error(ctx, "supabase auth user sync worker exited unexpectedly; restarting", + zap.Error(err), + zap.Int("restart_attempt", restartAttempt), + zap.Duration("restart_in", delay), + zap.Duration("runtime", runtime), + ) + + timer := time.NewTimer(delay) + select { + case <-ctx.Done(): + timer.Stop() + return ctx.Err() + case <-timer.C: + } + } +} + +func runRecovering(ctx context.Context, l logger.Logger, run func(context.Context) error) (err error) { + defer func() { + if recovered := recover(); recovered != nil { + l.Error(ctx, "supabase auth user sync worker panicked", + zap.String("panic", fmt.Sprint(recovered)), + zap.String("stack", string(debug.Stack())), + ) + + err = fmt.Errorf("worker panic: %v", recovered) + } + }() + + err = run(ctx) + if err == nil && ctx.Err() == nil { + return errors.New("worker exited without error") + } + + return err +} + +func restartBackoff(attempt int, base time.Duration, max time.Duration) time.Duration { + if base <= 0 { + base = defaultRestartDelay + } + if max < base { + max = base + } + + delay := base + for i := 1; i < attempt; i++ { + if delay >= max/2 { + return max + } + + delay *= 2 + } + + return delay +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go new file mode 100644 index 0000000000..3e26d71a71 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go @@ -0,0 +1,71 @@ +package supabaseauthusersync + +import ( + "context" + "errors" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +func TestSuperviseRestartsAfterUnexpectedError(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var runs atomic.Int32 + errCh := make(chan error, 1) + + go func() { + errCh <- supervise(ctx, logger.NewNopLogger(), supervisorConfig{ + RestartDelay: time.Millisecond, + MaxRestartDelay: time.Millisecond, + HealthyRunResetAfter: time.Hour, + }, func(ctx context.Context) error { + attempt := runs.Add(1) + if attempt < 3 { + return errors.New("boom") + } + + cancel() + <-ctx.Done() + return ctx.Err() + }) + }() + + err := <-errCh + require.ErrorIs(t, err, context.Canceled) + require.Equal(t, int32(3), runs.Load()) +} + +func TestSuperviseRestartsAfterPanic(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + var runs atomic.Int32 + errCh := make(chan error, 1) + + go func() { + errCh <- supervise(ctx, logger.NewNopLogger(), supervisorConfig{ + RestartDelay: time.Millisecond, + MaxRestartDelay: time.Millisecond, + HealthyRunResetAfter: time.Hour, + }, func(ctx context.Context) error { + attempt := runs.Add(1) + if attempt == 1 { + panic("boom") + } + + cancel() + <-ctx.Done() + return ctx.Err() + }) + }() + + err := <-errCh + require.ErrorIs(t, err, context.Canceled) + require.Equal(t, int32(2), runs.Load()) +} diff --git a/packages/dashboard-api/main.go b/packages/dashboard-api/main.go index 0f0bac4ddb..106217091c 100644 --- a/packages/dashboard-api/main.go +++ b/packages/dashboard-api/main.go @@ -247,7 +247,7 @@ func run() int { ) wg.Go(func() { - if err := syncRunner.Run(signalCtx); err != nil && !errors.Is(err, context.Canceled) { + if err := syncRunner.RunWithRestart(signalCtx); err != nil && !errors.Is(err, context.Canceled) { l.Error(ctx, "supabase auth user sync worker error", zap.Error(err)) errorCode.Add(1) } diff --git a/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql b/packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql similarity index 76% rename from packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql rename to packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql index 63ac79e1de..a250681905 100644 --- a/packages/db/pkg/dashboard/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql +++ b/packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql @@ -1,7 +1,7 @@ -- +goose Up -- +goose StatementBegin -CREATE TABLE auth.user_sync_queue ( +CREATE TABLE public.user_sync_queue ( id BIGSERIAL PRIMARY KEY, user_id UUID NOT NULL, operation TEXT NOT NULL CHECK (operation IN ('upsert', 'delete')), @@ -14,15 +14,24 @@ CREATE TABLE auth.user_sync_queue ( dead_lettered_at TIMESTAMPTZ NULL ); +ALTER TABLE public.user_sync_queue ENABLE ROW LEVEL SECURITY; + CREATE INDEX auth_user_sync_queue_pending_idx - ON auth.user_sync_queue (id) + ON public.user_sync_queue (id) WHERE dead_lettered_at IS NULL AND locked_at IS NULL; CREATE INDEX auth_user_sync_queue_user_idx - ON auth.user_sync_queue (user_id); + ON public.user_sync_queue (user_id); + +GRANT INSERT ON public.user_sync_queue TO trigger_user; +GRANT USAGE, SELECT ON SEQUENCE public.user_sync_queue_id_seq TO trigger_user; -GRANT INSERT ON auth.user_sync_queue TO trigger_user; -GRANT USAGE, SELECT ON SEQUENCE auth.user_sync_queue_id_seq TO trigger_user; +CREATE POLICY "Allow to create a user sync queue item" + ON public.user_sync_queue + AS PERMISSIVE + FOR INSERT + TO trigger_user + WITH CHECK (TRUE); -- Keep direct insert-sync and also enqueue CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER @@ -32,7 +41,7 @@ BEGIN INSERT INTO public.users (id, email) VALUES (NEW.id, NEW.email); - INSERT INTO auth.user_sync_queue (user_id, operation) + INSERT INTO public.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); RETURN NEW; @@ -54,7 +63,7 @@ BEGIN END IF; IF OLD.email IS DISTINCT FROM NEW.email THEN - INSERT INTO auth.user_sync_queue (user_id, operation) + INSERT INTO public.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); END IF; @@ -69,7 +78,7 @@ AS $func$ BEGIN DELETE FROM public.users WHERE id = OLD.id; - INSERT INTO auth.user_sync_queue (user_id, operation) + INSERT INTO public.user_sync_queue (user_id, operation) VALUES (OLD.id, 'delete'); RETURN OLD; @@ -120,9 +129,11 @@ BEGIN END; $func$ SECURITY DEFINER SET search_path = public; -REVOKE INSERT ON auth.user_sync_queue FROM trigger_user; -REVOKE USAGE, SELECT ON SEQUENCE auth.user_sync_queue_id_seq FROM trigger_user; +REVOKE INSERT ON public.user_sync_queue FROM trigger_user; +REVOKE USAGE, SELECT ON SEQUENCE public.user_sync_queue_id_seq FROM trigger_user; + +DROP POLICY IF EXISTS "Allow to create a user sync queue item" ON public.user_sync_queue; -DROP TABLE auth.user_sync_queue; +DROP TABLE public.user_sync_queue; -- +goose StatementEnd diff --git a/packages/db/pkg/auth/queries/models.go b/packages/db/pkg/auth/queries/models.go index de91154ef2..e32050f8ad 100644 --- a/packages/db/pkg/auth/queries/models.go +++ b/packages/db/pkg/auth/queries/models.go @@ -220,6 +220,19 @@ type User struct { Email string } +type UserSyncQueue struct { + ID int64 + UserID uuid.UUID + Operation string + CreatedAt time.Time + NextAttemptAt time.Time + LockedAt *time.Time + LockOwner *string + AttemptCount int32 + LastError *string + DeadLetteredAt *time.Time +} + type UsersTeam struct { ID int64 UserID uuid.UUID diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql index f2fe8ed889..e0d7354dc9 100644 --- a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql @@ -1,3 +1,3 @@ -- name: AckUserSyncQueueItem :exec -DELETE FROM auth.user_sync_queue +DELETE FROM public.user_sync_queue WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql index af8c29aeaf..08f35e662d 100644 --- a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql @@ -1,12 +1,12 @@ -- name: ClaimUserSyncQueueBatch :many -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = now(), lock_owner = sqlc.arg(lock_owner)::text, attempt_count = attempt_count + 1 WHERE id IN ( SELECT id - FROM auth.user_sync_queue + FROM public.user_sync_queue WHERE dead_lettered_at IS NULL AND next_attempt_at <= now() AND (locked_at IS NULL OR locked_at < now() - sqlc.arg(lock_timeout)::interval) diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql index fd4d6a87e9..a68d8cd363 100644 --- a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql @@ -1,5 +1,5 @@ -- name: DeadLetterUserSyncQueueItem :exec -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = NULL, lock_owner = NULL, diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql index cdc21a34d1..5386fdce5e 100644 --- a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql +++ b/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql @@ -1,5 +1,5 @@ -- name: RetryUserSyncQueueItem :exec -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = NULL, lock_owner = NULL, diff --git a/packages/db/queries/ack.sql.go b/packages/db/queries/ack.sql.go index 66a517f0a5..b55274a6c9 100644 --- a/packages/db/queries/ack.sql.go +++ b/packages/db/queries/ack.sql.go @@ -10,7 +10,7 @@ import ( ) const ackUserSyncQueueItem = `-- name: AckUserSyncQueueItem :exec -DELETE FROM auth.user_sync_queue +DELETE FROM public.user_sync_queue WHERE id = $1::bigint ` diff --git a/packages/db/queries/claim_batch.sql.go b/packages/db/queries/claim_batch.sql.go index f6797712b5..b36cfb0a39 100644 --- a/packages/db/queries/claim_batch.sql.go +++ b/packages/db/queries/claim_batch.sql.go @@ -14,14 +14,14 @@ import ( ) const claimUserSyncQueueBatch = `-- name: ClaimUserSyncQueueBatch :many -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = now(), lock_owner = $1::text, attempt_count = attempt_count + 1 WHERE id IN ( SELECT id - FROM auth.user_sync_queue + FROM public.user_sync_queue WHERE dead_lettered_at IS NULL AND next_attempt_at <= now() AND (locked_at IS NULL OR locked_at < now() - $2::interval) diff --git a/packages/db/queries/dead_letter.sql.go b/packages/db/queries/dead_letter.sql.go index 3df0f8c511..b9b7941d8a 100644 --- a/packages/db/queries/dead_letter.sql.go +++ b/packages/db/queries/dead_letter.sql.go @@ -10,7 +10,7 @@ import ( ) const deadLetterUserSyncQueueItem = `-- name: DeadLetterUserSyncQueueItem :exec -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = NULL, lock_owner = NULL, diff --git a/packages/db/queries/models.go b/packages/db/queries/models.go index 9f0b0cf689..f3ec4451c9 100644 --- a/packages/db/queries/models.go +++ b/packages/db/queries/models.go @@ -54,19 +54,6 @@ type AuthUser struct { Email string } -type AuthUserSyncQueue struct { - ID int64 - UserID uuid.UUID - Operation string - CreatedAt time.Time - NextAttemptAt time.Time - LockedAt *time.Time - LockOwner *string - AttemptCount int32 - LastError *string - DeadLetteredAt *time.Time -} - type BillingSandboxLog struct { SandboxID string EnvID string @@ -239,6 +226,19 @@ type User struct { Email string } +type UserSyncQueue struct { + ID int64 + UserID uuid.UUID + Operation string + CreatedAt time.Time + NextAttemptAt time.Time + LockedAt *time.Time + LockOwner *string + AttemptCount int32 + LastError *string + DeadLetteredAt *time.Time +} + type UsersTeam struct { ID int64 UserID uuid.UUID diff --git a/packages/db/queries/retry.sql.go b/packages/db/queries/retry.sql.go index ccbb7da45b..941c96ae18 100644 --- a/packages/db/queries/retry.sql.go +++ b/packages/db/queries/retry.sql.go @@ -12,7 +12,7 @@ import ( ) const retryUserSyncQueueItem = `-- name: RetryUserSyncQueueItem :exec -UPDATE auth.user_sync_queue +UPDATE public.user_sync_queue SET locked_at = NULL, lock_owner = NULL, From 3a431aa1199f1cea4fe48a87e22b29cc60b14e65 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Mon, 30 Mar 2026 21:08:20 -0700 Subject: [PATCH 05/20] feat(sync): add supabase auth user sync configuration and secrets management - Introduced `supabase_auth_user_sync_enabled` variable to control user synchronization. - Updated Nomad job configuration to include the new sync setting. - Added Google Secret Manager resources for managing the sync configuration securely. - Enhanced the dashboard API to utilize the new sync configuration in processing logic. - Refactored related components to improve error handling and logging for the sync process. --- .../job-dashboard-api/jobs/dashboard-api.hcl | 1 + iac/modules/job-dashboard-api/main.tf | 1 + iac/modules/job-dashboard-api/variables.tf | 4 + iac/provider-gcp/api.tf | 34 +++ iac/provider-gcp/main.tf | 52 +++-- iac/provider-gcp/nomad/main.tf | 11 +- iac/provider-gcp/nomad/variables.tf | 8 + packages/dashboard-api/internal/cfg/model.go | 12 +- .../internal/supabaseauthusersync/config.go | 15 +- .../internal/supabaseauthusersync/logging.go | 213 +++++++++++++++++ .../supabaseauthusersync/processor.go | 108 ++++++--- .../internal/supabaseauthusersync/runner.go | 30 ++- .../supabaseauthusersync/supervisor.go | 11 +- packages/dashboard-api/main.go | 10 +- packages/db/Makefile | 7 + .../db/scripts/auth-user-sync-smoke/main.go | 219 ++++++++++++++++++ 16 files changed, 642 insertions(+), 94 deletions(-) create mode 100644 packages/dashboard-api/internal/supabaseauthusersync/logging.go create mode 100644 packages/db/scripts/auth-user-sync-smoke/main.go diff --git a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl index f37d681cc9..7bdaca2f24 100644 --- a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl +++ b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl @@ -80,6 +80,7 @@ job "dashboard-api" { AUTH_DB_READ_REPLICA_CONNECTION_STRING = "${auth_db_read_replica_connection_string}" CLICKHOUSE_CONNECTION_STRING = "${clickhouse_connection_string}" SUPABASE_JWT_SECRETS = "${supabase_jwt_secrets}" + SUPABASE_AUTH_USER_SYNC_ENABLED = "${supabase_auth_user_sync_enabled}" OTEL_COLLECTOR_GRPC_ENDPOINT = "${otel_collector_grpc_endpoint}" LOGS_COLLECTOR_ADDRESS = "${logs_collector_address}" } diff --git a/iac/modules/job-dashboard-api/main.tf b/iac/modules/job-dashboard-api/main.tf index d1c455e3d1..482ae19d9c 100644 --- a/iac/modules/job-dashboard-api/main.tf +++ b/iac/modules/job-dashboard-api/main.tf @@ -15,6 +15,7 @@ resource "nomad_job" "dashboard_api" { auth_db_read_replica_connection_string = var.auth_db_read_replica_connection_string clickhouse_connection_string = var.clickhouse_connection_string supabase_jwt_secrets = var.supabase_jwt_secrets + supabase_auth_user_sync_enabled = var.supabase_auth_user_sync_enabled subdomain = "dashboard-api" diff --git a/iac/modules/job-dashboard-api/variables.tf b/iac/modules/job-dashboard-api/variables.tf index 8b77e1c9a6..69d16a0419 100644 --- a/iac/modules/job-dashboard-api/variables.tf +++ b/iac/modules/job-dashboard-api/variables.tf @@ -44,6 +44,10 @@ variable "supabase_jwt_secrets" { sensitive = true } +variable "supabase_auth_user_sync_enabled" { + type = string +} + variable "otel_collector_grpc_port" { type = number default = 4317 diff --git a/iac/provider-gcp/api.tf b/iac/provider-gcp/api.tf index 26f1a1b408..736b322090 100644 --- a/iac/provider-gcp/api.tf +++ b/iac/provider-gcp/api.tf @@ -27,6 +27,40 @@ resource "google_secret_manager_secret_version" "postgres_read_replica_connectio } } +resource "google_secret_manager_secret" "auth_db_connection_string" { + secret_id = "${var.prefix}auth-db-connection-string" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "auth_db_connection_string" { + secret = google_secret_manager_secret.auth_db_connection_string.name + secret_data = " " + + lifecycle { + ignore_changes = [secret_data] + } +} + +resource "google_secret_manager_secret" "dashboard_api_supabase_auth_user_sync_enabled" { + secret_id = "${var.prefix}dashboard-api-supabase-auth-user-sync-enabled" + + replication { + auto {} + } +} + +resource "google_secret_manager_secret_version" "dashboard_api_supabase_auth_user_sync_enabled" { + secret = google_secret_manager_secret.dashboard_api_supabase_auth_user_sync_enabled.name + secret_data = "false" + + lifecycle { + ignore_changes = [secret_data] + } +} + resource "random_password" "api_secret" { length = 32 special = false diff --git a/iac/provider-gcp/main.tf b/iac/provider-gcp/main.tf index 8fe538e1fa..32da66604f 100644 --- a/iac/provider-gcp/main.tf +++ b/iac/provider-gcp/main.tf @@ -213,31 +213,33 @@ module "nomad" { additional_traefik_arguments = var.additional_traefik_arguments # API - api_server_count = var.api_server_count - api_resources_cpu_count = var.api_resources_cpu_count - api_resources_memory_mb = var.api_resources_memory_mb - api_machine_count = var.api_cluster_size - api_node_pool = var.api_node_pool - api_port = var.api_port - environment = var.environment - google_service_account_key = module.init.google_service_account_key - api_secret = random_password.api_secret.result - custom_envs_repository_name = google_artifact_registry_repository.custom_environments_repository.name - postgres_connection_string_secret_name = module.init.postgres_connection_string_secret_name - postgres_read_replica_connection_string_secret_version = google_secret_manager_secret_version.postgres_read_replica_connection_string - supabase_jwt_secrets_secret_name = module.init.supabase_jwt_secret_name - posthog_api_key_secret_name = module.init.posthog_api_key_secret_name - analytics_collector_host_secret_name = module.init.analytics_collector_host_secret_name - analytics_collector_api_token_secret_name = module.init.analytics_collector_api_token_secret_name - api_admin_token = random_password.api_admin_secret.result - redis_cluster_url_secret_version = module.init.redis_cluster_url_secret_version - redis_tls_ca_base64_secret_version = module.init.redis_tls_ca_base64_secret_version - sandbox_access_token_hash_seed = random_password.sandbox_access_token_hash_seed.result - sandbox_storage_backend = var.sandbox_storage_backend - db_max_open_connections = var.db_max_open_connections - db_min_idle_connections = var.db_min_idle_connections - auth_db_max_open_connections = var.auth_db_max_open_connections - auth_db_min_idle_connections = var.auth_db_min_idle_connections + api_server_count = var.api_server_count + api_resources_cpu_count = var.api_resources_cpu_count + api_resources_memory_mb = var.api_resources_memory_mb + api_machine_count = var.api_cluster_size + api_node_pool = var.api_node_pool + api_port = var.api_port + environment = var.environment + google_service_account_key = module.init.google_service_account_key + api_secret = random_password.api_secret.result + custom_envs_repository_name = google_artifact_registry_repository.custom_environments_repository.name + postgres_connection_string_secret_name = module.init.postgres_connection_string_secret_name + auth_db_connection_string_secret_version = google_secret_manager_secret_version.auth_db_connection_string + postgres_read_replica_connection_string_secret_version = google_secret_manager_secret_version.postgres_read_replica_connection_string + supabase_jwt_secrets_secret_name = module.init.supabase_jwt_secret_name + dashboard_api_supabase_auth_user_sync_enabled_secret_version = google_secret_manager_secret_version.dashboard_api_supabase_auth_user_sync_enabled + posthog_api_key_secret_name = module.init.posthog_api_key_secret_name + analytics_collector_host_secret_name = module.init.analytics_collector_host_secret_name + analytics_collector_api_token_secret_name = module.init.analytics_collector_api_token_secret_name + api_admin_token = random_password.api_admin_secret.result + redis_cluster_url_secret_version = module.init.redis_cluster_url_secret_version + redis_tls_ca_base64_secret_version = module.init.redis_tls_ca_base64_secret_version + sandbox_access_token_hash_seed = random_password.sandbox_access_token_hash_seed.result + sandbox_storage_backend = var.sandbox_storage_backend + db_max_open_connections = var.db_max_open_connections + db_min_idle_connections = var.db_min_idle_connections + auth_db_max_open_connections = var.auth_db_max_open_connections + auth_db_min_idle_connections = var.auth_db_min_idle_connections # Click Proxy client_proxy_count = var.client_proxy_count diff --git a/iac/provider-gcp/nomad/main.tf b/iac/provider-gcp/nomad/main.tf index f7889e2e35..4a9aae6220 100644 --- a/iac/provider-gcp/nomad/main.tf +++ b/iac/provider-gcp/nomad/main.tf @@ -10,6 +10,10 @@ data "google_secret_manager_secret_version" "postgres_connection_string" { secret = var.postgres_connection_string_secret_name } +data "google_secret_manager_secret_version" "auth_db_connection_string" { + secret = var.auth_db_connection_string_secret_version.secret +} + data "google_secret_manager_secret_version" "postgres_read_replica_connection_string" { secret = var.postgres_read_replica_connection_string_secret_version.secret } @@ -18,6 +22,10 @@ data "google_secret_manager_secret_version" "supabase_jwt_secrets" { secret = var.supabase_jwt_secrets_secret_name } +data "google_secret_manager_secret_version" "dashboard_api_supabase_auth_user_sync_enabled" { + secret = var.dashboard_api_supabase_auth_user_sync_enabled_secret_version.secret +} + data "google_secret_manager_secret_version" "posthog_api_key" { secret = var.posthog_api_key_secret_name } @@ -131,10 +139,11 @@ module "dashboard_api" { image = data.google_artifact_registry_docker_image.dashboard_api_image[0].self_link postgres_connection_string = data.google_secret_manager_secret_version.postgres_connection_string.secret_data - auth_db_connection_string = data.google_secret_manager_secret_version.postgres_connection_string.secret_data + auth_db_connection_string = trimspace(data.google_secret_manager_secret_version.auth_db_connection_string.secret_data) auth_db_read_replica_connection_string = trimspace(data.google_secret_manager_secret_version.postgres_read_replica_connection_string.secret_data) clickhouse_connection_string = local.clickhouse_connection_string supabase_jwt_secrets = trimspace(data.google_secret_manager_secret_version.supabase_jwt_secrets.secret_data) + supabase_auth_user_sync_enabled = trimspace(data.google_secret_manager_secret_version.dashboard_api_supabase_auth_user_sync_enabled.secret_data) otel_collector_grpc_port = var.otel_collector_grpc_port logs_proxy_port = var.logs_proxy_port diff --git a/iac/provider-gcp/nomad/variables.tf b/iac/provider-gcp/nomad/variables.tf index 54ead44500..2bbbb9494f 100644 --- a/iac/provider-gcp/nomad/variables.tf +++ b/iac/provider-gcp/nomad/variables.tf @@ -175,6 +175,10 @@ variable "postgres_connection_string_secret_name" { type = string } +variable "auth_db_connection_string_secret_version" { + type = any +} + variable "postgres_read_replica_connection_string_secret_version" { type = any } @@ -183,6 +187,10 @@ variable "supabase_jwt_secrets_secret_name" { type = string } +variable "dashboard_api_supabase_auth_user_sync_enabled_secret_version" { + type = any +} + variable "client_proxy_count" { type = number } diff --git a/packages/dashboard-api/internal/cfg/model.go b/packages/dashboard-api/internal/cfg/model.go index aafed3776c..bbd83194f6 100644 --- a/packages/dashboard-api/internal/cfg/model.go +++ b/packages/dashboard-api/internal/cfg/model.go @@ -1,10 +1,6 @@ package cfg -import ( - "time" - - "github.com/caarlos0/env/v11" -) +import "github.com/caarlos0/env/v11" type Config struct { Port int `env:"PORT" envDefault:"3010"` @@ -15,11 +11,7 @@ type Config struct { AuthDBConnectionString string `env:"AUTH_DB_CONNECTION_STRING"` AuthDBReadReplicaConnectionString string `env:"AUTH_DB_READ_REPLICA_CONNECTION_STRING"` - SupabaseAuthUserSyncEnabled bool `env:"SUPABASE_AUTH_USER_SYNC_ENABLED" envDefault:"false"` - SupabaseAuthUserSyncBatchSize int32 `env:"SUPABASE_AUTH_USER_SYNC_BATCH_SIZE" envDefault:"50"` - SupabaseAuthUserSyncPollInterval time.Duration `env:"SUPABASE_AUTH_USER_SYNC_POLL_INTERVAL" envDefault:"2s"` - SupabaseAuthUserSyncLockTimeout time.Duration `env:"SUPABASE_AUTH_USER_SYNC_LOCK_TIMEOUT" envDefault:"2m"` - SupabaseAuthUserSyncMaxAttempts int32 `env:"SUPABASE_AUTH_USER_SYNC_MAX_ATTEMPTS" envDefault:"20"` + SupabaseAuthUserSyncEnabled bool `env:"SUPABASE_AUTH_USER_SYNC_ENABLED" envDefault:"false"` } func Parse() (Config, error) { diff --git a/packages/dashboard-api/internal/supabaseauthusersync/config.go b/packages/dashboard-api/internal/supabaseauthusersync/config.go index 6883064a9e..778be6d592 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/config.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/config.go @@ -2,6 +2,13 @@ package supabaseauthusersync import "time" +const ( + defaultBatchSize int32 = 50 + defaultPollInterval time.Duration = 2 * time.Second + defaultLockTimeout time.Duration = 2 * time.Minute + defaultMaxAttempts int32 = 20 +) + type Config struct { Enabled bool BatchSize int32 @@ -13,9 +20,9 @@ type Config struct { func DefaultConfig() Config { return Config{ Enabled: false, - BatchSize: 50, - PollInterval: 2 * time.Second, - LockTimeout: 2 * time.Minute, - MaxAttempts: 20, + BatchSize: defaultBatchSize, + PollInterval: defaultPollInterval, + LockTimeout: defaultLockTimeout, + MaxAttempts: defaultMaxAttempts, } } diff --git a/packages/dashboard-api/internal/supabaseauthusersync/logging.go b/packages/dashboard-api/internal/supabaseauthusersync/logging.go new file mode 100644 index 0000000000..c60d02a0c5 --- /dev/null +++ b/packages/dashboard-api/internal/supabaseauthusersync/logging.go @@ -0,0 +1,213 @@ +package supabaseauthusersync + +import ( + "sort" + "time" + + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +type processOutcome string + +const ( + processOutcomeAcked processOutcome = "acked" + processOutcomeAckFailed processOutcome = "ack_failed" + processOutcomeRetried processOutcome = "retried" + processOutcomeRetryFailed processOutcome = "retry_failed" + processOutcomeDeadLettered processOutcome = "dead_lettered" + processOutcomeDeadLetterFailed processOutcome = "dead_letter_failed" +) + +type reconcileAction string + +const ( + reconcileActionUpsertPublicUser reconcileAction = "upsert_public_user" + reconcileActionDeletePublicUser reconcileAction = "delete_public_user" +) + +type processResult struct { + Outcome processOutcome + Action reconcileAction + Duration time.Duration + Backoff time.Duration +} + +type batchSummary struct { + ClaimedCount int + AckedCount int + AckFailedCount int + RetriedCount int + RetryFailedCount int + DeadLetteredCount int + DeadLetterFailedCount int + MaxAttemptCount int32 + OldestCreatedAt time.Time + NewestCreatedAt time.Time + OldestItemAge time.Duration + NewestItemAge time.Duration + OperationCounts map[string]int + ActionCounts map[string]int +} + +func newBatchSummary(items []QueueItem, now time.Time) batchSummary { + summary := batchSummary{ + ClaimedCount: len(items), + OperationCounts: make(map[string]int), + ActionCounts: make(map[string]int), + } + + for i, item := range items { + if i == 0 || item.AttemptCount > summary.MaxAttemptCount { + summary.MaxAttemptCount = item.AttemptCount + } + + summary.OperationCounts[item.Operation]++ + + if item.CreatedAt.IsZero() { + continue + } + + if summary.OldestCreatedAt.IsZero() || item.CreatedAt.Before(summary.OldestCreatedAt) { + summary.OldestCreatedAt = item.CreatedAt + } + if summary.NewestCreatedAt.IsZero() || item.CreatedAt.After(summary.NewestCreatedAt) { + summary.NewestCreatedAt = item.CreatedAt + } + } + + if !summary.OldestCreatedAt.IsZero() { + summary.OldestItemAge = ageSince(summary.OldestCreatedAt, now) + summary.NewestItemAge = ageSince(summary.NewestCreatedAt, now) + } + + return summary +} + +func (s *batchSummary) Add(result processResult) { + switch result.Outcome { + case processOutcomeAcked: + s.AckedCount++ + case processOutcomeAckFailed: + s.AckFailedCount++ + case processOutcomeRetried: + s.RetriedCount++ + case processOutcomeRetryFailed: + s.RetryFailedCount++ + case processOutcomeDeadLettered: + s.DeadLetteredCount++ + case processOutcomeDeadLetterFailed: + s.DeadLetterFailedCount++ + } + + if result.Action != "" { + s.ActionCounts[string(result.Action)]++ + } +} + +func (s batchSummary) Fields(totalDuration time.Duration) []zap.Field { + fields := []zap.Field{ + zap.Int("queue_batch.claimed_count", s.ClaimedCount), + zap.Int("queue_batch.acked_count", s.AckedCount), + zap.Int("queue_batch.ack_failed_count", s.AckFailedCount), + zap.Int("queue_batch.retried_count", s.RetriedCount), + zap.Int("queue_batch.retry_failed_count", s.RetryFailedCount), + zap.Int("queue_batch.dead_lettered_count", s.DeadLetteredCount), + zap.Int("queue_batch.dead_letter_failed_count", s.DeadLetterFailedCount), + zap.Int32("queue_batch.max_attempt", s.MaxAttemptCount), + zap.Duration("queue_batch.duration", totalDuration), + } + + if !s.OldestCreatedAt.IsZero() { + fields = append(fields, + logger.Time("queue_batch.oldest_item_created_at", s.OldestCreatedAt), + logger.Time("queue_batch.newest_item_created_at", s.NewestCreatedAt), + zap.Duration("queue_batch.oldest_item_age", s.OldestItemAge), + zap.Duration("queue_batch.newest_item_age", s.NewestItemAge), + ) + } + + if len(s.OperationCounts) > 0 { + fields = append(fields, zap.Object("queue_batch.operation_counts", countsField(s.OperationCounts))) + } + if len(s.ActionCounts) > 0 { + fields = append(fields, zap.Object("queue_batch.action_counts", countsField(s.ActionCounts))) + } + + return fields +} + +func (s batchSummary) Level() zapcore.Level { + if s.AckFailedCount > 0 || s.RetryFailedCount > 0 || s.DeadLetteredCount > 0 || s.DeadLetterFailedCount > 0 { + return zap.ErrorLevel + } + if s.RetriedCount > 0 { + return zap.WarnLevel + } + + return zap.InfoLevel +} + +func processResultFields(item QueueItem, result processResult, now time.Time) []zap.Field { + fields := queueItemFields(item, now) + fields = append(fields, + zap.String("queue_item.outcome", string(result.Outcome)), + zap.Duration("queue_item.duration", result.Duration), + ) + + if result.Action != "" { + fields = append(fields, zap.String("queue_item.action", string(result.Action))) + } + if result.Backoff > 0 { + fields = append(fields, + zap.Duration("queue_item.retry_backoff", result.Backoff), + zap.Int32("queue_item.next_attempt", item.AttemptCount+1), + ) + } + + return fields +} + +func queueItemFields(item QueueItem, now time.Time) []zap.Field { + fields := []zap.Field{ + zap.Int64("queue_item.id", item.ID), + logger.WithUserID(item.UserID.String()), + zap.String("queue_item.operation", item.Operation), + zap.Int32("queue_item.attempt", item.AttemptCount), + } + + if !item.CreatedAt.IsZero() { + fields = append(fields, + logger.Time("queue_item.created_at", item.CreatedAt), + zap.Duration("queue_item.age", ageSince(item.CreatedAt, now)), + ) + } + + return fields +} + +func ageSince(createdAt time.Time, now time.Time) time.Duration { + if createdAt.IsZero() || now.Before(createdAt) { + return 0 + } + + return now.Sub(createdAt) +} + +type countsField map[string]int + +func (f countsField) MarshalLogObject(enc zapcore.ObjectEncoder) error { + keys := make([]string, 0, len(f)) + for key := range f { + keys = append(keys, key) + } + sort.Strings(keys) + + for _, key := range keys { + enc.AddInt(key, f[key]) + } + + return nil +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go index 28b0816f64..1d562c8288 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor.go @@ -37,57 +37,99 @@ func NewProcessor(store processorStore, maxAttempts int32, l logger.Logger) *Pro } } -func (p *Processor) Process(ctx context.Context, item QueueItem) { - err := p.processOnce(ctx, item) +func (p *Processor) Process(ctx context.Context, item QueueItem) processResult { + startedAt := time.Now() + action, err := p.processOnce(ctx, item) + result := processResult{ + Action: action, + Duration: time.Since(startedAt), + } if err == nil { if ackErr := p.store.Ack(ctx, item.ID); ackErr != nil { - p.l.Error(ctx, "failed to ack queue item", - zap.Int64("queue_item_id", item.ID), - zap.String("user_id", item.UserID.String()), - zap.Error(ackErr), + result.Outcome = processOutcomeAckFailed + + p.l.Error(ctx, "processed supabase auth sync queue item but failed to ack", + append( + processResultFields(item, result, time.Now()), + zap.NamedError("ack_error", ackErr), + )..., ) + + return result } - return - } + result.Outcome = processOutcomeAcked + p.l.Info(ctx, "processed supabase auth sync queue item", processResultFields(item, result, time.Now())...) - p.l.Warn(ctx, "failed to process queue item", - zap.Int64("queue_item_id", item.ID), - zap.String("user_id", item.UserID.String()), - zap.Int32("attempt", item.AttemptCount), - zap.Error(err), - ) + return result + } if item.AttemptCount >= p.maxAttempts { if dlErr := p.store.DeadLetter(ctx, item.ID, err.Error()); dlErr != nil { - p.l.Error(ctx, "failed to dead-letter queue item", - zap.Int64("queue_item_id", item.ID), - zap.Error(dlErr), + result.Outcome = processOutcomeDeadLetterFailed + + p.l.Error(ctx, "failed to dead-letter supabase auth sync queue item", + append( + processResultFields(item, result, time.Now()), + zap.Int32("queue_item.max_attempts", p.maxAttempts), + zap.NamedError("processing_error", err), + zap.NamedError("dead_letter_error", dlErr), + )..., ) + + return result } - return + result.Outcome = processOutcomeDeadLettered + p.l.Error(ctx, "dead-lettered supabase auth sync queue item after max attempts", + append( + processResultFields(item, result, time.Now()), + zap.Int32("queue_item.max_attempts", p.maxAttempts), + zap.NamedError("processing_error", err), + )..., + ) + + return result } backoff := retryBackoff(item.AttemptCount) + result.Outcome = processOutcomeRetried + result.Backoff = backoff if retryErr := p.store.Retry(ctx, item.ID, backoff, err.Error()); retryErr != nil { - p.l.Error(ctx, "failed to retry queue item", - zap.Int64("queue_item_id", item.ID), - zap.Error(retryErr), + result.Outcome = processOutcomeRetryFailed + + p.l.Error(ctx, "failed to schedule supabase auth sync queue item retry", + append( + processResultFields(item, result, time.Now()), + zap.NamedError("processing_error", err), + zap.NamedError("retry_error", retryErr), + )..., ) + + return result } + + p.l.Warn(ctx, "retrying supabase auth sync queue item after processing error", + append( + processResultFields(item, result, time.Now()), + zap.NamedError("processing_error", err), + )..., + ) + + return result } -func (p *Processor) processOnce(ctx context.Context, item QueueItem) (err error) { +func (p *Processor) processOnce(ctx context.Context, item QueueItem) (action reconcileAction, err error) { defer func() { if recovered := recover(); recovered != nil { - p.l.Error(ctx, "panic while processing queue item", - zap.Int64("queue_item_id", item.ID), - zap.String("user_id", item.UserID.String()), - zap.String("panic", fmt.Sprint(recovered)), - zap.String("stack", string(debug.Stack())), + p.l.Error(ctx, "panic while processing supabase auth sync queue item", + append( + queueItemFields(item, time.Now()), + zap.String("worker.panic", fmt.Sprint(recovered)), + zap.String("worker.stack", string(debug.Stack())), + )..., ) err = fmt.Errorf("panic while processing queue item: %v", recovered) @@ -97,26 +139,26 @@ func (p *Processor) processOnce(ctx context.Context, item QueueItem) (err error) return p.reconcile(ctx, item) } -func (p *Processor) reconcile(ctx context.Context, item QueueItem) error { +func (p *Processor) reconcile(ctx context.Context, item QueueItem) (reconcileAction, error) { authUser, err := p.store.GetAuthUser(ctx, item.UserID) if errors.Is(err, pgx.ErrNoRows) { if delErr := p.store.DeletePublicUser(ctx, item.UserID); delErr != nil { - return fmt.Errorf("delete public.users %s: %w", item.UserID, delErr) + return "", fmt.Errorf("delete public.users %s: %w", item.UserID, delErr) } - return nil + return reconcileActionDeletePublicUser, nil } if err != nil { - return fmt.Errorf("get auth.users %s: %w", item.UserID, err) + return "", fmt.Errorf("get auth.users %s: %w", item.UserID, err) } if err = p.store.UpsertPublicUser(ctx, authUser.ID, authUser.Email); err != nil { - return fmt.Errorf("upsert public.users %s: %w", authUser.ID, err) + return "", fmt.Errorf("upsert public.users %s: %w", authUser.ID, err) } - return nil + return reconcileActionUpsertPublicUser, nil } func retryBackoff(attempt int32) time.Duration { diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go index 39de50196c..1acd35d480 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner.go @@ -18,20 +18,24 @@ type Runner struct { } func NewRunner(cfg Config, store *Store, lockOwner string, l logger.Logger) *Runner { + workerLogger := l.With(logger.WithServiceInstanceID(lockOwner)) + return &Runner{ cfg: cfg, store: store, - processor: NewProcessor(store, cfg.MaxAttempts, l), + processor: NewProcessor(store, cfg.MaxAttempts, workerLogger), lockOwner: lockOwner, - l: l, + l: workerLogger, } } func (r *Runner) Run(ctx context.Context) error { r.l.Info(ctx, "starting supabase auth user sync worker", - zap.String("lock_owner", r.lockOwner), - zap.Duration("poll_interval", r.cfg.PollInterval), - zap.Int32("batch_size", r.cfg.BatchSize), + zap.String("worker.lock_owner", r.lockOwner), + zap.Duration("worker.poll_interval", r.cfg.PollInterval), + zap.Int32("worker.batch_size", r.cfg.BatchSize), + zap.Duration("worker.lock_timeout", r.cfg.LockTimeout), + zap.Int32("worker.max_attempts", r.cfg.MaxAttempts), ) ticker := time.NewTicker(r.cfg.PollInterval) @@ -40,7 +44,7 @@ func (r *Runner) Run(ctx context.Context) error { for { select { case <-ctx.Done(): - r.l.Info(ctx, "stopping supabase auth user sync worker") + r.l.Info(ctx, "stopping supabase auth user sync worker", zap.Error(ctx.Err())) return ctx.Err() case <-ticker.C: @@ -50,9 +54,15 @@ func (r *Runner) Run(ctx context.Context) error { } func (r *Runner) poll(ctx context.Context) { + claimedAt := time.Now() items, err := r.store.ClaimBatch(ctx, r.lockOwner, r.cfg.LockTimeout, r.cfg.BatchSize) if err != nil { - r.l.Error(ctx, "failed to claim queue batch", zap.Error(err)) + r.l.Error(ctx, "failed to claim supabase auth sync queue batch", + zap.String("worker.lock_owner", r.lockOwner), + zap.Duration("worker.lock_timeout", r.cfg.LockTimeout), + zap.Int32("worker.batch_size", r.cfg.BatchSize), + zap.Error(err), + ) return } @@ -61,9 +71,11 @@ func (r *Runner) poll(ctx context.Context) { return } - r.l.Debug(ctx, "claimed queue batch", zap.Int("count", len(items))) + summary := newBatchSummary(items, claimedAt) for _, item := range items { - r.processor.Process(ctx, item) + summary.Add(r.processor.Process(ctx, item)) } + + r.l.Log(ctx, summary.Level(), "processed supabase auth sync queue batch", summary.Fields(time.Since(claimedAt))...) } diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go index ce8dafb463..24269f351e 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go @@ -59,9 +59,10 @@ func supervise(ctx context.Context, l logger.Logger, cfg supervisorConfig, run f delay := restartBackoff(restartAttempt, cfg.RestartDelay, cfg.MaxRestartDelay) l.Error(ctx, "supabase auth user sync worker exited unexpectedly; restarting", zap.Error(err), - zap.Int("restart_attempt", restartAttempt), - zap.Duration("restart_in", delay), - zap.Duration("runtime", runtime), + zap.Int("worker.restart_attempt", restartAttempt), + zap.Duration("worker.restart_in", delay), + zap.Duration("worker.runtime", runtime), + zap.Duration("worker.healthy_run_reset_after", cfg.HealthyRunResetAfter), ) timer := time.NewTimer(delay) @@ -78,8 +79,8 @@ func runRecovering(ctx context.Context, l logger.Logger, run func(context.Contex defer func() { if recovered := recover(); recovered != nil { l.Error(ctx, "supabase auth user sync worker panicked", - zap.String("panic", fmt.Sprint(recovered)), - zap.String("stack", string(debug.Stack())), + zap.String("worker.panic", fmt.Sprint(recovered)), + zap.String("worker.stack", string(debug.Stack())), ) err = fmt.Errorf("worker panic: %v", recovered) diff --git a/packages/dashboard-api/main.go b/packages/dashboard-api/main.go index 106217091c..22bca86bf3 100644 --- a/packages/dashboard-api/main.go +++ b/packages/dashboard-api/main.go @@ -233,14 +233,10 @@ func run() int { if config.SupabaseAuthUserSyncEnabled { workerLogger := l.With(zap.String("worker", "supabase_auth_user_sync")) syncStore := supabaseauthusersync.NewStore(db.Queries) + syncConfig := supabaseauthusersync.DefaultConfig() + syncConfig.Enabled = true syncRunner := supabaseauthusersync.NewRunner( - supabaseauthusersync.Config{ - Enabled: true, - BatchSize: config.SupabaseAuthUserSyncBatchSize, - PollInterval: config.SupabaseAuthUserSyncPollInterval, - LockTimeout: config.SupabaseAuthUserSyncLockTimeout, - MaxAttempts: config.SupabaseAuthUserSyncMaxAttempts, - }, + syncConfig, syncStore, serviceInstanceID, workerLogger, diff --git a/packages/db/Makefile b/packages/db/Makefile index e5f1d67439..acd52c6d0e 100644 --- a/packages/db/Makefile +++ b/packages/db/Makefile @@ -5,6 +5,9 @@ PREFIX := $(strip $(subst ",,$(PREFIX))) goose := GOOSE_DRIVER=postgres GOOSE_DBSTRING=$(POSTGRES_CONNECTION_STRING) go tool goose -table "_migrations" -dir "migrations" goose-local := GOOSE_DBSTRING=postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable go tool goose -table "_migrations" -dir "migrations" postgres +AUTH_USER_SYNC_SMOKE_COUNT ?= 15 +AUTH_USER_SYNC_SMOKE_WAIT ?= 10s + .PHONY: migrate migrate: @@ -61,3 +64,7 @@ test: build-tools seed-db: @echo "Seeding database..." @POSTGRES_CONNECTION_STRING=$(POSTGRES_CONNECTION_STRING) go run ./scripts/seed/postgres/seed-db.go + +.PHONY: auth-user-sync-smoke +auth-user-sync-smoke: + @AUTH_DB_CONNECTION_STRING="$(AUTH_DB_CONNECTION_STRING)" POSTGRES_CONNECTION_STRING="$(POSTGRES_CONNECTION_STRING)" AUTH_USER_SYNC_SMOKE_COUNT="$(AUTH_USER_SYNC_SMOKE_COUNT)" AUTH_USER_SYNC_SMOKE_WAIT="$(AUTH_USER_SYNC_SMOKE_WAIT)" go run ./scripts/auth-user-sync-smoke diff --git a/packages/db/scripts/auth-user-sync-smoke/main.go b/packages/db/scripts/auth-user-sync-smoke/main.go new file mode 100644 index 0000000000..6ba5cb807a --- /dev/null +++ b/packages/db/scripts/auth-user-sync-smoke/main.go @@ -0,0 +1,219 @@ +package main + +import ( + "context" + "fmt" + "os" + "os/signal" + "strconv" + "strings" + "syscall" + "time" + + "github.com/google/uuid" + + authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" +) + +const ( + defaultCount = 1 + defaultWait = 30 * time.Second +) + +type config struct { + ConnectionString string + Count int + Wait time.Duration +} + +type authUser struct { + ID uuid.UUID + Email string +} + +func main() { + ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) + err := run(ctx) + stop() + + if err != nil { + fmt.Fprintf(os.Stderr, "auth user sync smoke failed: %v\n", err) + os.Exit(1) + } +} + +func run(ctx context.Context) error { + cfg, err := loadConfig() + if err != nil { + return err + } + + db, err := authdb.NewClient(ctx, cfg.ConnectionString, "") + if err != nil { + return fmt.Errorf("create auth db client: %w", err) + } + defer func() { + if closeErr := db.Close(); closeErr != nil { + fmt.Fprintf(os.Stderr, "close auth db client: %v\n", closeErr) + } + }() + + users := newAuthUsers(cfg.Count) + insertedUsers := make([]authUser, 0, len(users)) + + defer func() { + if len(insertedUsers) == 0 { + return + } + + if cleanupErr := cleanupInsertedUsers(ctx, db, insertedUsers); cleanupErr != nil { + fmt.Fprintf(os.Stderr, "cleanup auth users: %v\n", cleanupErr) + + return + } + + fmt.Fprintf(os.Stdout, "cleaned up %d auth.users rows\n", len(insertedUsers)) + }() + + if err := insertUsers(ctx, db, users, &insertedUsers); err != nil { + return err + } + + fmt.Fprintf(os.Stdout, "created %d auth.users rows\n", len(insertedUsers)) + for _, user := range insertedUsers { + fmt.Fprintf(os.Stdout, " %s %s\n", user.ID.String(), user.Email) + } + + fmt.Fprintf(os.Stdout, "waiting %s before delete\n", cfg.Wait) + + timer := time.NewTimer(cfg.Wait) + defer timer.Stop() + + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + } + + if err := cleanupInsertedUsers(ctx, db, insertedUsers); err != nil { + return fmt.Errorf("delete auth users: %w", err) + } + + insertedUsers = nil + fmt.Fprintln(os.Stdout, "deleted auth.users rows") + + return nil +} + +func loadConfig() (config, error) { + connectionString := strings.TrimSpace(os.Getenv("AUTH_DB_CONNECTION_STRING")) + if connectionString == "" { + connectionString = strings.TrimSpace(os.Getenv("POSTGRES_CONNECTION_STRING")) + } + if connectionString == "" { + return config{}, fmt.Errorf("AUTH_DB_CONNECTION_STRING or POSTGRES_CONNECTION_STRING must be set") + } + + count, err := loadCount() + if err != nil { + return config{}, err + } + + wait, err := loadWait() + if err != nil { + return config{}, err + } + + return config{ + ConnectionString: connectionString, + Count: count, + Wait: wait, + }, nil +} + +func loadCount() (int, error) { + rawCount := strings.TrimSpace(os.Getenv("AUTH_USER_SYNC_SMOKE_COUNT")) + if rawCount == "" { + return defaultCount, nil + } + + count, err := strconv.Atoi(rawCount) + if err != nil { + return 0, fmt.Errorf("parse AUTH_USER_SYNC_SMOKE_COUNT: %w", err) + } + if count < 1 { + return 0, fmt.Errorf("AUTH_USER_SYNC_SMOKE_COUNT must be at least 1") + } + + return count, nil +} + +func loadWait() (time.Duration, error) { + rawWait := strings.TrimSpace(os.Getenv("AUTH_USER_SYNC_SMOKE_WAIT")) + if rawWait == "" { + return defaultWait, nil + } + + wait, err := time.ParseDuration(rawWait) + if err != nil { + return 0, fmt.Errorf("parse AUTH_USER_SYNC_SMOKE_WAIT: %w", err) + } + if wait <= 0 { + return 0, fmt.Errorf("AUTH_USER_SYNC_SMOKE_WAIT must be greater than 0") + } + + return wait, nil +} + +func newAuthUsers(count int) []authUser { + runID := strings.ReplaceAll(uuid.NewString(), "-", "") + users := make([]authUser, 0, count) + + for i := range count { + userID := uuid.New() + email := fmt.Sprintf("auth-sync-smoke-%s-%02d@example.com", runID[:12], i+1) + users = append(users, authUser{ + ID: userID, + Email: email, + }) + } + + return users +} + +func cleanupInsertedUsers(ctx context.Context, db *authdb.Client, users []authUser) error { + cleanupCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 10*time.Second) + defer cancel() + + return deleteUsers(cleanupCtx, db, users) +} + +func insertUsers(ctx context.Context, db *authdb.Client, users []authUser, insertedUsers *[]authUser) error { + for _, user := range users { + err := db.TestsRawSQL(ctx, ` +INSERT INTO auth.users (id, email) +VALUES ($1, $2) +`, user.ID, user.Email) + if err != nil { + return fmt.Errorf("insert auth user %s: %w", user.Email, err) + } + + *insertedUsers = append(*insertedUsers, user) + } + + return nil +} + +func deleteUsers(ctx context.Context, db *authdb.Client, users []authUser) error { + for _, user := range users { + err := db.TestsRawSQL(ctx, ` +DELETE FROM auth.users +WHERE id = $1 +`, user.ID) + if err != nil { + return fmt.Errorf("delete auth user %s: %w", user.Email, err) + } + } + + return nil +} From eb69678058f7258ada2feb6fe686fdced8c7f6c7 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 13:19:05 -0700 Subject: [PATCH 06/20] chore: remove smoke test --- packages/db/Makefile | 7 - .../db/scripts/auth-user-sync-smoke/main.go | 219 ------------------ 2 files changed, 226 deletions(-) delete mode 100644 packages/db/scripts/auth-user-sync-smoke/main.go diff --git a/packages/db/Makefile b/packages/db/Makefile index acd52c6d0e..e5f1d67439 100644 --- a/packages/db/Makefile +++ b/packages/db/Makefile @@ -5,9 +5,6 @@ PREFIX := $(strip $(subst ",,$(PREFIX))) goose := GOOSE_DRIVER=postgres GOOSE_DBSTRING=$(POSTGRES_CONNECTION_STRING) go tool goose -table "_migrations" -dir "migrations" goose-local := GOOSE_DBSTRING=postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable go tool goose -table "_migrations" -dir "migrations" postgres -AUTH_USER_SYNC_SMOKE_COUNT ?= 15 -AUTH_USER_SYNC_SMOKE_WAIT ?= 10s - .PHONY: migrate migrate: @@ -64,7 +61,3 @@ test: build-tools seed-db: @echo "Seeding database..." @POSTGRES_CONNECTION_STRING=$(POSTGRES_CONNECTION_STRING) go run ./scripts/seed/postgres/seed-db.go - -.PHONY: auth-user-sync-smoke -auth-user-sync-smoke: - @AUTH_DB_CONNECTION_STRING="$(AUTH_DB_CONNECTION_STRING)" POSTGRES_CONNECTION_STRING="$(POSTGRES_CONNECTION_STRING)" AUTH_USER_SYNC_SMOKE_COUNT="$(AUTH_USER_SYNC_SMOKE_COUNT)" AUTH_USER_SYNC_SMOKE_WAIT="$(AUTH_USER_SYNC_SMOKE_WAIT)" go run ./scripts/auth-user-sync-smoke diff --git a/packages/db/scripts/auth-user-sync-smoke/main.go b/packages/db/scripts/auth-user-sync-smoke/main.go deleted file mode 100644 index 6ba5cb807a..0000000000 --- a/packages/db/scripts/auth-user-sync-smoke/main.go +++ /dev/null @@ -1,219 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" - "time" - - "github.com/google/uuid" - - authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" -) - -const ( - defaultCount = 1 - defaultWait = 30 * time.Second -) - -type config struct { - ConnectionString string - Count int - Wait time.Duration -} - -type authUser struct { - ID uuid.UUID - Email string -} - -func main() { - ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM) - err := run(ctx) - stop() - - if err != nil { - fmt.Fprintf(os.Stderr, "auth user sync smoke failed: %v\n", err) - os.Exit(1) - } -} - -func run(ctx context.Context) error { - cfg, err := loadConfig() - if err != nil { - return err - } - - db, err := authdb.NewClient(ctx, cfg.ConnectionString, "") - if err != nil { - return fmt.Errorf("create auth db client: %w", err) - } - defer func() { - if closeErr := db.Close(); closeErr != nil { - fmt.Fprintf(os.Stderr, "close auth db client: %v\n", closeErr) - } - }() - - users := newAuthUsers(cfg.Count) - insertedUsers := make([]authUser, 0, len(users)) - - defer func() { - if len(insertedUsers) == 0 { - return - } - - if cleanupErr := cleanupInsertedUsers(ctx, db, insertedUsers); cleanupErr != nil { - fmt.Fprintf(os.Stderr, "cleanup auth users: %v\n", cleanupErr) - - return - } - - fmt.Fprintf(os.Stdout, "cleaned up %d auth.users rows\n", len(insertedUsers)) - }() - - if err := insertUsers(ctx, db, users, &insertedUsers); err != nil { - return err - } - - fmt.Fprintf(os.Stdout, "created %d auth.users rows\n", len(insertedUsers)) - for _, user := range insertedUsers { - fmt.Fprintf(os.Stdout, " %s %s\n", user.ID.String(), user.Email) - } - - fmt.Fprintf(os.Stdout, "waiting %s before delete\n", cfg.Wait) - - timer := time.NewTimer(cfg.Wait) - defer timer.Stop() - - select { - case <-ctx.Done(): - return ctx.Err() - case <-timer.C: - } - - if err := cleanupInsertedUsers(ctx, db, insertedUsers); err != nil { - return fmt.Errorf("delete auth users: %w", err) - } - - insertedUsers = nil - fmt.Fprintln(os.Stdout, "deleted auth.users rows") - - return nil -} - -func loadConfig() (config, error) { - connectionString := strings.TrimSpace(os.Getenv("AUTH_DB_CONNECTION_STRING")) - if connectionString == "" { - connectionString = strings.TrimSpace(os.Getenv("POSTGRES_CONNECTION_STRING")) - } - if connectionString == "" { - return config{}, fmt.Errorf("AUTH_DB_CONNECTION_STRING or POSTGRES_CONNECTION_STRING must be set") - } - - count, err := loadCount() - if err != nil { - return config{}, err - } - - wait, err := loadWait() - if err != nil { - return config{}, err - } - - return config{ - ConnectionString: connectionString, - Count: count, - Wait: wait, - }, nil -} - -func loadCount() (int, error) { - rawCount := strings.TrimSpace(os.Getenv("AUTH_USER_SYNC_SMOKE_COUNT")) - if rawCount == "" { - return defaultCount, nil - } - - count, err := strconv.Atoi(rawCount) - if err != nil { - return 0, fmt.Errorf("parse AUTH_USER_SYNC_SMOKE_COUNT: %w", err) - } - if count < 1 { - return 0, fmt.Errorf("AUTH_USER_SYNC_SMOKE_COUNT must be at least 1") - } - - return count, nil -} - -func loadWait() (time.Duration, error) { - rawWait := strings.TrimSpace(os.Getenv("AUTH_USER_SYNC_SMOKE_WAIT")) - if rawWait == "" { - return defaultWait, nil - } - - wait, err := time.ParseDuration(rawWait) - if err != nil { - return 0, fmt.Errorf("parse AUTH_USER_SYNC_SMOKE_WAIT: %w", err) - } - if wait <= 0 { - return 0, fmt.Errorf("AUTH_USER_SYNC_SMOKE_WAIT must be greater than 0") - } - - return wait, nil -} - -func newAuthUsers(count int) []authUser { - runID := strings.ReplaceAll(uuid.NewString(), "-", "") - users := make([]authUser, 0, count) - - for i := range count { - userID := uuid.New() - email := fmt.Sprintf("auth-sync-smoke-%s-%02d@example.com", runID[:12], i+1) - users = append(users, authUser{ - ID: userID, - Email: email, - }) - } - - return users -} - -func cleanupInsertedUsers(ctx context.Context, db *authdb.Client, users []authUser) error { - cleanupCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 10*time.Second) - defer cancel() - - return deleteUsers(cleanupCtx, db, users) -} - -func insertUsers(ctx context.Context, db *authdb.Client, users []authUser, insertedUsers *[]authUser) error { - for _, user := range users { - err := db.TestsRawSQL(ctx, ` -INSERT INTO auth.users (id, email) -VALUES ($1, $2) -`, user.ID, user.Email) - if err != nil { - return fmt.Errorf("insert auth user %s: %w", user.Email, err) - } - - *insertedUsers = append(*insertedUsers, user) - } - - return nil -} - -func deleteUsers(ctx context.Context, db *authdb.Client, users []authUser) error { - for _, user := range users { - err := db.TestsRawSQL(ctx, ` -DELETE FROM auth.users -WHERE id = $1 -`, user.ID) - if err != nil { - return fmt.Errorf("delete auth user %s: %w", user.Email, err) - } - } - - return nil -} From bb9fa39df3ab25cc5c97fc1da4771d43ca006296 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 13:49:02 -0700 Subject: [PATCH 07/20] add: e2e runner test --- .../supabaseauthusersync/runner_test.go | 579 ++++++++++++------ 1 file changed, 377 insertions(+), 202 deletions(-) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go index b578185745..1429c2a605 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -1,10 +1,9 @@ package supabaseauthusersync import ( - "os/exec" - "path/filepath" - "strings" - "sync/atomic" + "context" + "fmt" + "sync" "testing" "time" @@ -17,284 +16,460 @@ import ( "github.com/e2b-dev/infra/packages/shared/pkg/logger" ) -func setupTestDB(t *testing.T) *testutils.Database { - t.Helper() +const ( + testRunnerPollInterval = 20 * time.Millisecond + testRunnerLockTimeout = 150 * time.Millisecond + testEventuallyTimeout = 8 * time.Second + testEventuallyTick = 25 * time.Millisecond + testRunnerStopTimeout = 2 * time.Second +) + +type runnerProcess struct { + cancel context.CancelFunc + done chan error + stopOnce sync.Once +} + +type userExpectation struct { + Email string + Exists bool +} + +type queueSnapshot struct { + Total int + DeadLettered int +} +func TestSupabaseAuthUserSyncRunner_EndToEnd(t *testing.T) { db := testutils.SetupDatabase(t) - repoRoot := gitRoot(t) - migrationSQL := readFile(t, filepath.Join( - repoRoot, - "packages", "db", "pkg", "dashboard", "migrations", - "20260328000000_dashboard_supabase_auth_user_sync_queue.sql", - )) + t.Run("repairs_insert_update_delete_drift", func(t *testing.T) { + ctx := t.Context() + userID := uuid.New() + initialEmail := fmt.Sprintf("auth-sync-%s-initial@example.com", userID.String()[:8]) + updatedEmail := fmt.Sprintf("auth-sync-%s-updated@example.com", userID.String()[:8]) + + insertAuthUser(t, ctx, db, userID, initialEmail) + deletePublicUser(t, ctx, db, userID) + assertQueueBacklog(t, ctx, db, 1) + + insertRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-insert") + t.Cleanup(func() { + insertRunner.Stop(t) + }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: initialEmail, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + insertRunner.Stop(t) + + updateAuthUserEmail(t, ctx, db, userID, updatedEmail) + setPublicUserEmail(t, ctx, db, userID, "stale@example.com") + assertQueueBacklog(t, ctx, db, 1) + + updateRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-update") + t.Cleanup(func() { + updateRunner.Stop(t) + }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: updatedEmail, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + updateRunner.Stop(t) + + deleteAuthUser(t, ctx, db, userID) + insertPublicUser(t, ctx, db, userID, "ghost@example.com") + assertQueueBacklog(t, ctx, db, 1) + + deleteRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-delete") + t.Cleanup(func() { + deleteRunner.Stop(t) + }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Exists: false, + }, + }) + waitForQueueDrain(t, ctx, db) + deleteRunner.Stop(t) + }) + + t.Run("reclaims_stale_queue_locks", func(t *testing.T) { + ctx := t.Context() + userID := uuid.New() + email := fmt.Sprintf("auth-sync-%s-locked@example.com", userID.String()[:8]) + + insertAuthUser(t, ctx, db, userID, email) + deletePublicUser(t, ctx, db, userID) + lockQueueItems(t, ctx, db, userID, time.Now().Add(-time.Minute), "stale-worker") + assertQueueBacklog(t, ctx, db, 1) + + runner := startRunnerProcess(t, db, newTestRunnerConfig(2), "lock-reclaimer") + t.Cleanup(func() { + runner.Stop(t) + }) + + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: email, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + runner.Stop(t) + }) + + t.Run("drains_burst_backlog_with_multiple_runners", func(t *testing.T) { + ctx := t.Context() + const userCount = 60 + + userIDs := make([]uuid.UUID, 0, userCount) + + for i := 0; i < userCount; i++ { + userID := uuid.New() + userIDs = append(userIDs, userID) + + initialEmail := fmt.Sprintf("auth-sync-burst-%02d-initial@example.com", i) + insertAuthUser(t, ctx, db, userID, initialEmail) + + if i%2 == 0 { + updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v2@example.com", i)) + } + if i%5 == 0 { + updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v3@example.com", i)) + } - upSQL := extractGooseUp(migrationSQL) - err := db.AuthDb.TestsRawSQL(t.Context(), upSQL) - require.NoError(t, err, "failed to apply dashboard auth sync migration") + if i%3 == 0 { + deleteAuthUser(t, ctx, db, userID) + enqueueUserSyncItem(t, ctx, db, userID, "delete") + if i%6 == 0 { + insertPublicUser(t, ctx, db, userID, fmt.Sprintf("ghost-%02d@example.com", i)) + } - return db -} + continue + } -func gitRoot(t *testing.T) string { - t.Helper() + if i%8 == 0 { + deletePublicUser(t, ctx, db, userID) + } else if i%7 == 0 { + setPublicUserEmail(t, ctx, db, userID, fmt.Sprintf("stale-%02d@example.com", i)) + } - cmd := exec.CommandContext(t.Context(), "git", "rev-parse", "--show-toplevel") - output, err := cmd.Output() - require.NoError(t, err) + if i%4 == 0 { + enqueueUserSyncItem(t, ctx, db, userID, "upsert") + } + if i%9 == 0 { + enqueueUserSyncItem(t, ctx, db, userID, "upsert") + } + } - return strings.TrimSpace(string(output)) -} + authUsers, err := loadAuthUsers(ctx, db) + require.NoError(t, err) -func readFile(t *testing.T, path string) string { - t.Helper() + want := expectedUsersForIDs(userIDs, authUsers) + assertQueueBacklog(t, ctx, db, userCount) - cmd := exec.CommandContext(t.Context(), "cat", path) - output, err := cmd.Output() - require.NoError(t, err) + runnerA := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-a") + runnerB := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-b") + t.Cleanup(func() { + runnerA.Stop(t) + runnerB.Stop(t) + }) - return string(output) + waitForPublicUsers(t, ctx, db, want) + waitForQueueDrain(t, ctx, db) + + runnerA.Stop(t) + runnerB.Stop(t) + }) } -func extractGooseUp(sql string) string { - parts := strings.SplitN(sql, "-- +goose Down", 2) - up := parts[0] - up = strings.ReplaceAll(up, "-- +goose Up", "") - up = strings.ReplaceAll(up, "-- +goose StatementBegin", "") - up = strings.ReplaceAll(up, "-- +goose StatementEnd", "") +func newTestRunnerConfig(batchSize int32) Config { + cfg := DefaultConfig() + cfg.Enabled = true + cfg.BatchSize = batchSize + cfg.PollInterval = testRunnerPollInterval + cfg.LockTimeout = testRunnerLockTimeout + cfg.MaxAttempts = 5 - return up + return cfg } -func insertAuthUser(t *testing.T, db *testutils.Database, userID uuid.UUID, email string) { +func startRunnerProcess(t *testing.T, db *testutils.Database, cfg Config, lockOwner string) *runnerProcess { t.Helper() - err := db.AuthDb.TestsRawSQL(t.Context(), - "INSERT INTO auth.users (id, email) VALUES ($1, $2)", userID, email) - require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + done := make(chan error, 1) + runner := NewRunner(cfg, NewStore(db.SqlcClient.Queries), lockOwner, logger.NewNopLogger()) + + go func() { + done <- runner.Run(ctx) + }() + + return &runnerProcess{ + cancel: cancel, + done: done, + } } -func updateAuthUserEmail(t *testing.T, db *testutils.Database, userID uuid.UUID, email string) { +func (p *runnerProcess) Stop(t *testing.T) { t.Helper() - err := db.AuthDb.TestsRawSQL(t.Context(), - "UPDATE auth.users SET email = $1 WHERE id = $2", email, userID) - require.NoError(t, err) + + p.stopOnce.Do(func() { + p.cancel() + + select { + case err := <-p.done: + require.ErrorIs(t, err, context.Canceled) + case <-time.After(testRunnerStopTimeout): + t.Fatalf("runner did not stop within %s", testRunnerStopTimeout) + } + }) } -func deleteAuthUser(t *testing.T, db *testutils.Database, userID uuid.UUID) { +func insertAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { t.Helper() - err := db.AuthDb.TestsRawSQL(t.Context(), - "DELETE FROM auth.users WHERE id = $1", userID) + + err := db.AuthDb.TestsRawSQL(ctx, + "INSERT INTO auth.users (id, email) VALUES ($1, $2)", + userID, + email, + ) require.NoError(t, err) } -func getPublicUserEmail(t *testing.T, db *testutils.Database, userID uuid.UUID) (string, bool) { +func updateAuthUserEmail(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { t.Helper() - var email string - var found bool - - err := db.AuthDb.TestsRawSQLQuery(t.Context(), - "SELECT email FROM public.users WHERE id = $1", - func(rows pgx.Rows) error { - if rows.Next() { - found = true - return rows.Scan(&email) - } - return nil - }, + err := db.AuthDb.TestsRawSQL(ctx, + "UPDATE auth.users SET email = $1 WHERE id = $2", + email, userID, ) require.NoError(t, err) - - return email, found } -func queueDepth(t *testing.T, db *testutils.Database) int { +func deleteAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { t.Helper() - var count int - - err := db.AuthDb.TestsRawSQLQuery(t.Context(), - "SELECT count(*) FROM public.user_sync_queue WHERE dead_lettered_at IS NULL", - func(rows pgx.Rows) error { - if rows.Next() { - return rows.Scan(&count) - } - return nil - }, + err := db.AuthDb.TestsRawSQL(ctx, + "DELETE FROM auth.users WHERE id = $1", + userID, ) require.NoError(t, err) - - return count } -func TestInsertAuthUserCreatesQueueRow(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - db := setupTestDB(t) - - userID := uuid.New() - insertAuthUser(t, db, userID, "test@example.com") +func deletePublicUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { + t.Helper() - depth := queueDepth(t, db) - assert.Equal(t, 1, depth) + err := db.AuthDb.TestsRawSQL(ctx, + "DELETE FROM public.users WHERE id = $1", + userID, + ) + require.NoError(t, err) } -func TestProcessorReconciles_Insert(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } - - db := setupTestDB(t) - store := NewStore(db.SqlcClient.Queries) - l := logger.NewNopLogger() - proc := NewProcessor(store, 5, l) - - userID := uuid.New() - insertAuthUser(t, db, userID, "alice@example.com") +func insertPublicUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() - items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + err := db.AuthDb.TestsRawSQL(ctx, ` +INSERT INTO public.users (id, email) +VALUES ($1, $2) +ON CONFLICT (id) DO UPDATE +SET email = EXCLUDED.email, + updated_at = now() +`, + userID, + email, + ) require.NoError(t, err) - require.Len(t, items, 1) - - proc.Process(t.Context(), items[0]) - - email, found := getPublicUserEmail(t, db, userID) - assert.True(t, found) - assert.Equal(t, "alice@example.com", email) - - assert.Equal(t, 0, queueDepth(t, db)) } -func TestProcessorReconciles_UpdateEmail(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } +func setPublicUserEmail(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() - db := setupTestDB(t) - store := NewStore(db.SqlcClient.Queries) - l := logger.NewNopLogger() - proc := NewProcessor(store, 5, l) + err := db.AuthDb.TestsRawSQL(ctx, + "UPDATE public.users SET email = $1, updated_at = now() WHERE id = $2", + email, + userID, + ) + require.NoError(t, err) +} - userID := uuid.New() - insertAuthUser(t, db, userID, "old@example.com") +func enqueueUserSyncItem(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, operation string) { + t.Helper() - items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + err := db.AuthDb.TestsRawSQL(ctx, + "INSERT INTO public.user_sync_queue (user_id, operation) VALUES ($1, $2)", + userID, + operation, + ) require.NoError(t, err) - proc.Process(t.Context(), items[0]) +} - updateAuthUserEmail(t, db, userID, "new@example.com") +func lockQueueItems(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, lockedAt time.Time, lockOwner string) { + t.Helper() - items, err = store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) + err := db.AuthDb.TestsRawSQL(ctx, ` +UPDATE public.user_sync_queue +SET locked_at = $2, + lock_owner = $3 +WHERE user_id = $1 +`, + userID, + lockedAt, + lockOwner, + ) require.NoError(t, err) - require.Len(t, items, 1) - proc.Process(t.Context(), items[0]) - - email, found := getPublicUserEmail(t, db, userID) - assert.True(t, found) - assert.Equal(t, "new@example.com", email) } -func TestProcessorReconciles_Delete(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } +func loadPublicUsers(ctx context.Context, db *testutils.Database) (map[uuid.UUID]string, error) { + users := make(map[uuid.UUID]string) - db := setupTestDB(t) - store := NewStore(db.SqlcClient.Queries) - l := logger.NewNopLogger() - proc := NewProcessor(store, 5, l) + err := db.AuthDb.TestsRawSQLQuery(ctx, + "SELECT id, email FROM public.users", + func(rows pgx.Rows) error { + for rows.Next() { + var userID uuid.UUID + var email string + if err := rows.Scan(&userID, &email); err != nil { + return err + } + + users[userID] = email + } - userID := uuid.New() - insertAuthUser(t, db, userID, "doomed@example.com") + return rows.Err() + }, + ) + if err != nil { + return nil, err + } - items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) - require.NoError(t, err) - proc.Process(t.Context(), items[0]) + return users, nil +} - _, found := getPublicUserEmail(t, db, userID) - require.True(t, found) +func loadAuthUsers(ctx context.Context, db *testutils.Database) (map[uuid.UUID]string, error) { + users := make(map[uuid.UUID]string) - deleteAuthUser(t, db, userID) + err := db.AuthDb.TestsRawSQLQuery(ctx, + "SELECT id, email FROM auth.users", + func(rows pgx.Rows) error { + for rows.Next() { + var userID uuid.UUID + var email string + if err := rows.Scan(&userID, &email); err != nil { + return err + } + + users[userID] = email + } - items, err = store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) - require.NoError(t, err) - require.Len(t, items, 1) - proc.Process(t.Context(), items[0]) + return rows.Err() + }, + ) + if err != nil { + return nil, err + } - _, found = getPublicUserEmail(t, db, userID) - assert.False(t, found) + return users, nil } -func TestDuplicateQueueRowsConverge(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } +func loadQueueSnapshot(ctx context.Context, db *testutils.Database) (queueSnapshot, error) { + var snapshot queueSnapshot - db := setupTestDB(t) - store := NewStore(db.SqlcClient.Queries) - l := logger.NewNopLogger() - proc := NewProcessor(store, 5, l) + err := db.AuthDb.TestsRawSQLQuery(ctx, ` +SELECT + count(*)::int AS total, + count(*) FILTER (WHERE dead_lettered_at IS NOT NULL)::int AS dead_lettered +FROM public.user_sync_queue +`, + func(rows pgx.Rows) error { + if !rows.Next() { + return nil + } - userID := uuid.New() - insertAuthUser(t, db, userID, "dup@example.com") + return rows.Scan(&snapshot.Total, &snapshot.DeadLettered) + }, + ) + if err != nil { + return queueSnapshot{}, err + } - err := db.AuthDb.TestsRawSQL(t.Context(), - "INSERT INTO public.user_sync_queue (user_id, operation) VALUES ($1, 'upsert')", - userID) - require.NoError(t, err) + return snapshot, nil +} - items, err := store.ClaimBatch(t.Context(), "test-worker", 2*time.Minute, 10) - require.NoError(t, err) - assert.GreaterOrEqual(t, len(items), 2) +func expectedUsersForIDs(userIDs []uuid.UUID, authUsers map[uuid.UUID]string) map[uuid.UUID]userExpectation { + want := make(map[uuid.UUID]userExpectation, len(userIDs)) - for _, item := range items { - proc.Process(t.Context(), item) + for _, userID := range userIDs { + email, ok := authUsers[userID] + want[userID] = userExpectation{ + Email: email, + Exists: ok, + } } - email, found := getPublicUserEmail(t, db, userID) - assert.True(t, found) - assert.Equal(t, "dup@example.com", email) - assert.Equal(t, 0, queueDepth(t, db)) + return want } -func TestMultiInstanceClaimNoDoubleProcessing(t *testing.T) { - if testing.Short() { - t.Skip("skipping integration test") - } +func assertQueueBacklog(t *testing.T, ctx context.Context, db *testutils.Database, minimum int) { + t.Helper() - db := setupTestDB(t) + snapshot, err := loadQueueSnapshot(ctx, db) + require.NoError(t, err) + require.GreaterOrEqual(t, snapshot.Total, minimum) +} - for i := range 10 { - userID := uuid.New() - insertAuthUser(t, db, userID, "user"+string(rune('a'+i))+"@example.com") - } +func waitForQueueDrain(t *testing.T, ctx context.Context, db *testutils.Database) { + t.Helper() - store1 := NewStore(db.SqlcClient.Queries) - store2 := NewStore(db.SqlcClient.Queries) + require.EventuallyWithT(t, func(c *assert.CollectT) { + snapshot, err := loadQueueSnapshot(ctx, db) + if !assert.NoError(c, err) { + return + } - var claimed1, claimed2 atomic.Int32 + assert.Equal(c, 0, snapshot.Total) + assert.Equal(c, 0, snapshot.DeadLettered) + }, testEventuallyTimeout, testEventuallyTick) +} - ctx := t.Context() +func waitForPublicUsers(t *testing.T, ctx context.Context, db *testutils.Database, want map[uuid.UUID]userExpectation) { + t.Helper() - items1, err := store1.ClaimBatch(ctx, "worker-1", 2*time.Minute, 10) - require.NoError(t, err) - claimed1.Store(int32(len(items1))) + require.EventuallyWithT(t, func(c *assert.CollectT) { + got, err := loadPublicUsers(ctx, db) + if !assert.NoError(c, err) { + return + } - items2, err := store2.ClaimBatch(ctx, "worker-2", 2*time.Minute, 10) - require.NoError(t, err) - claimed2.Store(int32(len(items2))) + var gotExisting int + var wantExisting int - total := claimed1.Load() + claimed2.Load() - assert.Equal(t, int32(10), total, "all items should be claimed exactly once across both workers") + for userID, expectation := range want { + email, ok := got[userID] + if ok { + gotExisting++ + } + if expectation.Exists { + wantExisting++ + } - ids := make(map[int64]bool) - for _, item := range items1 { - ids[item.ID] = true - } - for _, item := range items2 { - assert.False(t, ids[item.ID], "item %d claimed by both workers", item.ID) - } + if !assert.Equalf(c, expectation.Exists, ok, "public.users presence for %s", userID) { + continue + } + if expectation.Exists { + assert.Equalf(c, expectation.Email, email, "public.users email for %s", userID) + } + } + + assert.Equal(c, wantExisting, gotExisting) + }, testEventuallyTimeout, testEventuallyTick) } From 461d2ecaadce1b56a51939c17b405005309f671f Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 14:20:01 -0700 Subject: [PATCH 08/20] chore: change dashboard-api env variable management --- .env.gcp.template | 3 + .../job-dashboard-api/jobs/dashboard-api.hcl | 5 +- iac/modules/job-dashboard-api/main.tf | 6 +- iac/modules/job-dashboard-api/variables.tf | 5 +- iac/provider-gcp/Makefile | 1 + iac/provider-gcp/api.tf | 17 ------ iac/provider-gcp/main.tf | 56 +++++++++---------- iac/provider-gcp/nomad/main.tf | 6 +- iac/provider-gcp/nomad/variables.tf | 9 +-- iac/provider-gcp/variables.tf | 5 ++ 10 files changed, 55 insertions(+), 58 deletions(-) diff --git a/.env.gcp.template b/.env.gcp.template index 6c5bced521..6f72d2d77c 100644 --- a/.env.gcp.template +++ b/.env.gcp.template @@ -77,6 +77,9 @@ CLICKHOUSE_CLUSTER_SIZE=1 # Dashboard API instance count (default: 0) DASHBOARD_API_COUNT= +# Additional dashboard-api env vars passed directly to the Nomad job (default: {}) +# Example: '{"SUPABASE_AUTH_USER_SYNC_ENABLED":"true"}' +DASHBOARD_API_ENV_VARS= # Filestore cache for builds shared across cluster (default:false) FILESTORE_CACHE_ENABLED= diff --git a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl index 7bdaca2f24..86a72cab50 100644 --- a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl +++ b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl @@ -80,9 +80,12 @@ job "dashboard-api" { AUTH_DB_READ_REPLICA_CONNECTION_STRING = "${auth_db_read_replica_connection_string}" CLICKHOUSE_CONNECTION_STRING = "${clickhouse_connection_string}" SUPABASE_JWT_SECRETS = "${supabase_jwt_secrets}" - SUPABASE_AUTH_USER_SYNC_ENABLED = "${supabase_auth_user_sync_enabled}" OTEL_COLLECTOR_GRPC_ENDPOINT = "${otel_collector_grpc_endpoint}" LOGS_COLLECTOR_ADDRESS = "${logs_collector_address}" + + %{ for key, val in env } + ${ key } = "${ val }" + %{ endfor } } config { diff --git a/iac/modules/job-dashboard-api/main.tf b/iac/modules/job-dashboard-api/main.tf index 482ae19d9c..aa577bfd2f 100644 --- a/iac/modules/job-dashboard-api/main.tf +++ b/iac/modules/job-dashboard-api/main.tf @@ -1,3 +1,7 @@ +locals { + env = { for key, value in var.env : key => value if value != null && value != "" } +} + resource "nomad_job" "dashboard_api" { jobspec = templatefile("${path.module}/jobs/dashboard-api.hcl", { update_stanza = var.update_stanza @@ -15,7 +19,7 @@ resource "nomad_job" "dashboard_api" { auth_db_read_replica_connection_string = var.auth_db_read_replica_connection_string clickhouse_connection_string = var.clickhouse_connection_string supabase_jwt_secrets = var.supabase_jwt_secrets - supabase_auth_user_sync_enabled = var.supabase_auth_user_sync_enabled + env = local.env subdomain = "dashboard-api" diff --git a/iac/modules/job-dashboard-api/variables.tf b/iac/modules/job-dashboard-api/variables.tf index 69d16a0419..625da7c82b 100644 --- a/iac/modules/job-dashboard-api/variables.tf +++ b/iac/modules/job-dashboard-api/variables.tf @@ -44,8 +44,9 @@ variable "supabase_jwt_secrets" { sensitive = true } -variable "supabase_auth_user_sync_enabled" { - type = string +variable "env" { + type = map(string) + default = {} } variable "otel_collector_grpc_port" { diff --git a/iac/provider-gcp/Makefile b/iac/provider-gcp/Makefile index f1a44e7975..a8f2db309c 100644 --- a/iac/provider-gcp/Makefile +++ b/iac/provider-gcp/Makefile @@ -75,6 +75,7 @@ tf_vars := \ $(call tfvar, LOKI_BOOT_DISK_TYPE) \ $(call tfvar, LOKI_USE_V13_SCHEMA_FROM) \ $(call tfvar, DASHBOARD_API_COUNT) \ + $(call tfvar, DASHBOARD_API_ENV_VARS) \ $(call tfvar, DEFAULT_PERSISTENT_VOLUME_TYPE) \ $(call tfvar, PERSISTENT_VOLUME_TYPES) \ $(call tfvar, DB_MAX_OPEN_CONNECTIONS) \ diff --git a/iac/provider-gcp/api.tf b/iac/provider-gcp/api.tf index 736b322090..c8b5586c96 100644 --- a/iac/provider-gcp/api.tf +++ b/iac/provider-gcp/api.tf @@ -44,23 +44,6 @@ resource "google_secret_manager_secret_version" "auth_db_connection_string" { } } -resource "google_secret_manager_secret" "dashboard_api_supabase_auth_user_sync_enabled" { - secret_id = "${var.prefix}dashboard-api-supabase-auth-user-sync-enabled" - - replication { - auto {} - } -} - -resource "google_secret_manager_secret_version" "dashboard_api_supabase_auth_user_sync_enabled" { - secret = google_secret_manager_secret.dashboard_api_supabase_auth_user_sync_enabled.name - secret_data = "false" - - lifecycle { - ignore_changes = [secret_data] - } -} - resource "random_password" "api_secret" { length = 32 special = false diff --git a/iac/provider-gcp/main.tf b/iac/provider-gcp/main.tf index 32da66604f..90294e554b 100644 --- a/iac/provider-gcp/main.tf +++ b/iac/provider-gcp/main.tf @@ -213,33 +213,32 @@ module "nomad" { additional_traefik_arguments = var.additional_traefik_arguments # API - api_server_count = var.api_server_count - api_resources_cpu_count = var.api_resources_cpu_count - api_resources_memory_mb = var.api_resources_memory_mb - api_machine_count = var.api_cluster_size - api_node_pool = var.api_node_pool - api_port = var.api_port - environment = var.environment - google_service_account_key = module.init.google_service_account_key - api_secret = random_password.api_secret.result - custom_envs_repository_name = google_artifact_registry_repository.custom_environments_repository.name - postgres_connection_string_secret_name = module.init.postgres_connection_string_secret_name - auth_db_connection_string_secret_version = google_secret_manager_secret_version.auth_db_connection_string - postgres_read_replica_connection_string_secret_version = google_secret_manager_secret_version.postgres_read_replica_connection_string - supabase_jwt_secrets_secret_name = module.init.supabase_jwt_secret_name - dashboard_api_supabase_auth_user_sync_enabled_secret_version = google_secret_manager_secret_version.dashboard_api_supabase_auth_user_sync_enabled - posthog_api_key_secret_name = module.init.posthog_api_key_secret_name - analytics_collector_host_secret_name = module.init.analytics_collector_host_secret_name - analytics_collector_api_token_secret_name = module.init.analytics_collector_api_token_secret_name - api_admin_token = random_password.api_admin_secret.result - redis_cluster_url_secret_version = module.init.redis_cluster_url_secret_version - redis_tls_ca_base64_secret_version = module.init.redis_tls_ca_base64_secret_version - sandbox_access_token_hash_seed = random_password.sandbox_access_token_hash_seed.result - sandbox_storage_backend = var.sandbox_storage_backend - db_max_open_connections = var.db_max_open_connections - db_min_idle_connections = var.db_min_idle_connections - auth_db_max_open_connections = var.auth_db_max_open_connections - auth_db_min_idle_connections = var.auth_db_min_idle_connections + api_server_count = var.api_server_count + api_resources_cpu_count = var.api_resources_cpu_count + api_resources_memory_mb = var.api_resources_memory_mb + api_machine_count = var.api_cluster_size + api_node_pool = var.api_node_pool + api_port = var.api_port + environment = var.environment + google_service_account_key = module.init.google_service_account_key + api_secret = random_password.api_secret.result + custom_envs_repository_name = google_artifact_registry_repository.custom_environments_repository.name + postgres_connection_string_secret_name = module.init.postgres_connection_string_secret_name + auth_db_connection_string_secret_version = google_secret_manager_secret_version.auth_db_connection_string + postgres_read_replica_connection_string_secret_version = google_secret_manager_secret_version.postgres_read_replica_connection_string + supabase_jwt_secrets_secret_name = module.init.supabase_jwt_secret_name + posthog_api_key_secret_name = module.init.posthog_api_key_secret_name + analytics_collector_host_secret_name = module.init.analytics_collector_host_secret_name + analytics_collector_api_token_secret_name = module.init.analytics_collector_api_token_secret_name + api_admin_token = random_password.api_admin_secret.result + redis_cluster_url_secret_version = module.init.redis_cluster_url_secret_version + redis_tls_ca_base64_secret_version = module.init.redis_tls_ca_base64_secret_version + sandbox_access_token_hash_seed = random_password.sandbox_access_token_hash_seed.result + sandbox_storage_backend = var.sandbox_storage_backend + db_max_open_connections = var.db_max_open_connections + db_min_idle_connections = var.db_min_idle_connections + auth_db_max_open_connections = var.auth_db_max_open_connections + auth_db_min_idle_connections = var.auth_db_min_idle_connections # Click Proxy client_proxy_count = var.client_proxy_count @@ -266,7 +265,8 @@ module "nomad" { otel_collector_resources_cpu_count = var.otel_collector_resources_cpu_count # Dashboard API - dashboard_api_count = var.dashboard_api_count + dashboard_api_count = var.dashboard_api_count + dashboard_api_env_vars = var.dashboard_api_env_vars # Docker reverse proxy docker_reverse_proxy_port = var.docker_reverse_proxy_port diff --git a/iac/provider-gcp/nomad/main.tf b/iac/provider-gcp/nomad/main.tf index 4a9aae6220..7d05bf3dec 100644 --- a/iac/provider-gcp/nomad/main.tf +++ b/iac/provider-gcp/nomad/main.tf @@ -22,10 +22,6 @@ data "google_secret_manager_secret_version" "supabase_jwt_secrets" { secret = var.supabase_jwt_secrets_secret_name } -data "google_secret_manager_secret_version" "dashboard_api_supabase_auth_user_sync_enabled" { - secret = var.dashboard_api_supabase_auth_user_sync_enabled_secret_version.secret -} - data "google_secret_manager_secret_version" "posthog_api_key" { secret = var.posthog_api_key_secret_name } @@ -143,7 +139,7 @@ module "dashboard_api" { auth_db_read_replica_connection_string = trimspace(data.google_secret_manager_secret_version.postgres_read_replica_connection_string.secret_data) clickhouse_connection_string = local.clickhouse_connection_string supabase_jwt_secrets = trimspace(data.google_secret_manager_secret_version.supabase_jwt_secrets.secret_data) - supabase_auth_user_sync_enabled = trimspace(data.google_secret_manager_secret_version.dashboard_api_supabase_auth_user_sync_enabled.secret_data) + env = var.dashboard_api_env_vars otel_collector_grpc_port = var.otel_collector_grpc_port logs_proxy_port = var.logs_proxy_port diff --git a/iac/provider-gcp/nomad/variables.tf b/iac/provider-gcp/nomad/variables.tf index 2bbbb9494f..64e9f7dd75 100644 --- a/iac/provider-gcp/nomad/variables.tf +++ b/iac/provider-gcp/nomad/variables.tf @@ -187,10 +187,6 @@ variable "supabase_jwt_secrets_secret_name" { type = string } -variable "dashboard_api_supabase_auth_user_sync_enabled_secret_version" { - type = any -} - variable "client_proxy_count" { type = number } @@ -461,6 +457,11 @@ variable "dashboard_api_count" { default = 0 } +variable "dashboard_api_env_vars" { + type = map(string) + default = {} +} + variable "volume_token_issuer" { type = string } diff --git a/iac/provider-gcp/variables.tf b/iac/provider-gcp/variables.tf index b8aee2ffaf..f4d8cfbf48 100644 --- a/iac/provider-gcp/variables.tf +++ b/iac/provider-gcp/variables.tf @@ -228,6 +228,11 @@ variable "dashboard_api_count" { default = 0 } +variable "dashboard_api_env_vars" { + type = map(string) + default = {} +} + variable "docker_reverse_proxy_port" { type = object({ name = string From ce0cbd1c8e578e2c8e25538e6dfaabd63c057424 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 16:04:46 -0700 Subject: [PATCH 09/20] refactor(sync): update user sync logic to utilize new database structure - Replaced the previous `Store` implementation with a new structure that integrates both authentication and main database queries. - Updated the `Runner` and `NewRunner` functions to accommodate the new database client structure. - Removed obsolete SQL queries and migration files related to the `user_sync_queue` table. - Enhanced the test suite to reflect changes in the runner's initialization and database interactions. --- .../internal/supabaseauthusersync/runner.go | 16 +++- .../supabaseauthusersync/runner_test.go | 8 +- .../internal/supabaseauthusersync/store.go | 29 ++++--- packages/dashboard-api/main.go | 4 +- ...shboard_supabase_auth_user_sync_queue.sql} | 79 +++++++++++-------- packages/db/{ => pkg/auth}/queries/ack.sql.go | 2 +- .../{ => pkg/auth}/queries/claim_batch.sql.go | 2 +- .../{ => pkg/auth}/queries/dead_letter.sql.go | 2 +- .../auth}/queries/get_auth_user.sql.go | 2 +- .../db/{ => pkg/auth}/queries/retry.sql.go | 2 +- .../supabase_auth_user_sync/ack.sql | 0 .../supabase_auth_user_sync/claim_batch.sql | 0 .../supabase_auth_user_sync/dead_letter.sql | 0 .../supabase_auth_user_sync/get_auth_user.sql | 0 .../supabase_auth_user_sync/retry.sql | 0 packages/db/pkg/testutils/db.go | 19 +++-- packages/db/queries/models.go | 13 --- .../users}/delete_public_user.sql | 0 .../users}/upsert_public_user.sql | 0 packages/db/sqlc.yaml | 1 + 20 files changed, 108 insertions(+), 71 deletions(-) rename packages/db/{migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql => pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql} (55%) rename packages/db/{ => pkg/auth}/queries/ack.sql.go (95%) rename packages/db/{ => pkg/auth}/queries/claim_batch.sql.go (98%) rename packages/db/{ => pkg/auth}/queries/dead_letter.sql.go (97%) rename packages/db/{ => pkg/auth}/queries/get_auth_user.sql.go (95%) rename packages/db/{ => pkg/auth}/queries/retry.sql.go (97%) rename packages/db/pkg/{dashboard => auth}/sql_queries/supabase_auth_user_sync/ack.sql (100%) rename packages/db/pkg/{dashboard => auth}/sql_queries/supabase_auth_user_sync/claim_batch.sql (100%) rename packages/db/pkg/{dashboard => auth}/sql_queries/supabase_auth_user_sync/dead_letter.sql (100%) rename packages/db/pkg/{dashboard => auth}/sql_queries/supabase_auth_user_sync/get_auth_user.sql (100%) rename packages/db/pkg/{dashboard => auth}/sql_queries/supabase_auth_user_sync/retry.sql (100%) rename packages/db/{pkg/dashboard/sql_queries/supabase_auth_user_sync => queries/users}/delete_public_user.sql (100%) rename packages/db/{pkg/dashboard/sql_queries/supabase_auth_user_sync => queries/users}/upsert_public_user.sql (100%) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go index 1acd35d480..fd7cfa3477 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner.go @@ -6,19 +6,31 @@ import ( "go.uber.org/zap" + sqlcdb "github.com/e2b-dev/infra/packages/db/client" + authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" "github.com/e2b-dev/infra/packages/shared/pkg/logger" ) +type runnerStore interface { + ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) +} + +type workerStore interface { + runnerStore + processorStore +} + type Runner struct { cfg Config - store *Store + store runnerStore processor *Processor lockOwner string l logger.Logger } -func NewRunner(cfg Config, store *Store, lockOwner string, l logger.Logger) *Runner { +func NewRunner(cfg Config, authDB *authdb.Client, mainDB *sqlcdb.Client, lockOwner string, l logger.Logger) *Runner { workerLogger := l.With(logger.WithServiceInstanceID(lockOwner)) + store := NewStore(authDB, mainDB) return &Runner{ cfg: cfg, diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go index 1429c2a605..064d35c77e 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -206,7 +206,13 @@ func startRunnerProcess(t *testing.T, db *testutils.Database, cfg Config, lockOw ctx, cancel := context.WithCancel(context.Background()) done := make(chan error, 1) - runner := NewRunner(cfg, NewStore(db.SqlcClient.Queries), lockOwner, logger.NewNopLogger()) + runner := NewRunner( + cfg, + db.AuthDb, + db.SqlcClient, + lockOwner, + logger.NewNopLogger(), + ) go func() { done <- runner.Run(ctx) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/store.go b/packages/dashboard-api/internal/supabaseauthusersync/store.go index 019e4bf6c6..9eee64ca93 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/store.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/store.go @@ -7,6 +7,9 @@ import ( "github.com/google/uuid" "github.com/jackc/pgx/v5/pgtype" + sqlcdb "github.com/e2b-dev/infra/packages/db/client" + authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" + authqueries "github.com/e2b-dev/infra/packages/db/pkg/auth/queries" "github.com/e2b-dev/infra/packages/db/queries" ) @@ -24,15 +27,21 @@ type AuthUser struct { } type Store struct { - q *queries.Queries + authQueries *authqueries.Queries + mainQueries *queries.Queries } -func NewStore(q *queries.Queries) *Store { - return &Store{q: q} +var _ workerStore = (*Store)(nil) + +func NewStore(authDB *authdb.Client, mainDB *sqlcdb.Client) *Store { + return &Store{ + authQueries: authDB.Write, + mainQueries: mainDB.Queries, + } } func (s *Store) ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) { - rows, err := s.q.ClaimUserSyncQueueBatch(ctx, queries.ClaimUserSyncQueueBatchParams{ + rows, err := s.authQueries.ClaimUserSyncQueueBatch(ctx, authqueries.ClaimUserSyncQueueBatchParams{ LockOwner: lockOwner, LockTimeout: durationToInterval(lockTimeout), BatchSize: batchSize, @@ -56,11 +65,11 @@ func (s *Store) ClaimBatch(ctx context.Context, lockOwner string, lockTimeout ti } func (s *Store) Ack(ctx context.Context, id int64) error { - return s.q.AckUserSyncQueueItem(ctx, id) + return s.authQueries.AckUserSyncQueueItem(ctx, id) } func (s *Store) Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error { - return s.q.RetryUserSyncQueueItem(ctx, queries.RetryUserSyncQueueItemParams{ + return s.authQueries.RetryUserSyncQueueItem(ctx, authqueries.RetryUserSyncQueueItemParams{ ID: id, Backoff: durationToInterval(backoff), LastError: lastError, @@ -68,14 +77,14 @@ func (s *Store) Retry(ctx context.Context, id int64, backoff time.Duration, last } func (s *Store) DeadLetter(ctx context.Context, id int64, lastError string) error { - return s.q.DeadLetterUserSyncQueueItem(ctx, queries.DeadLetterUserSyncQueueItemParams{ + return s.authQueries.DeadLetterUserSyncQueueItem(ctx, authqueries.DeadLetterUserSyncQueueItemParams{ ID: id, LastError: lastError, }) } func (s *Store) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) { - row, err := s.q.GetAuthUserByID(ctx, userID) + row, err := s.authQueries.GetAuthUserByID(ctx, userID) if err != nil { return nil, err } @@ -84,14 +93,14 @@ func (s *Store) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, e } func (s *Store) UpsertPublicUser(ctx context.Context, id uuid.UUID, email string) error { - return s.q.UpsertPublicUser(ctx, queries.UpsertPublicUserParams{ + return s.mainQueries.UpsertPublicUser(ctx, queries.UpsertPublicUserParams{ ID: id, Email: email, }) } func (s *Store) DeletePublicUser(ctx context.Context, id uuid.UUID) error { - return s.q.DeletePublicUser(ctx, id) + return s.mainQueries.DeletePublicUser(ctx, id) } func durationToInterval(d time.Duration) pgtype.Interval { diff --git a/packages/dashboard-api/main.go b/packages/dashboard-api/main.go index 22bca86bf3..f1a75eae72 100644 --- a/packages/dashboard-api/main.go +++ b/packages/dashboard-api/main.go @@ -232,12 +232,12 @@ func run() int { if config.SupabaseAuthUserSyncEnabled { workerLogger := l.With(zap.String("worker", "supabase_auth_user_sync")) - syncStore := supabaseauthusersync.NewStore(db.Queries) syncConfig := supabaseauthusersync.DefaultConfig() syncConfig.Enabled = true syncRunner := supabaseauthusersync.NewRunner( syncConfig, - syncStore, + authDB, + db, serviceInstanceID, workerLogger, ) diff --git a/packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql b/packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql similarity index 55% rename from packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql rename to packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql index a250681905..e300948803 100644 --- a/packages/db/migrations/20260328000000_dashboard_supabase_auth_user_sync_queue.sql +++ b/packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql @@ -1,16 +1,15 @@ -- +goose Up -- +goose StatementBegin - CREATE TABLE public.user_sync_queue ( - id BIGSERIAL PRIMARY KEY, - user_id UUID NOT NULL, - operation TEXT NOT NULL CHECK (operation IN ('upsert', 'delete')), - created_at TIMESTAMPTZ NOT NULL DEFAULT now(), - next_attempt_at TIMESTAMPTZ NOT NULL DEFAULT now(), - locked_at TIMESTAMPTZ NULL, - lock_owner TEXT NULL, - attempt_count INT NOT NULL DEFAULT 0, - last_error TEXT NULL, + id BIGSERIAL PRIMARY KEY, + user_id UUID NOT NULL, + operation TEXT NOT NULL CHECK (operation IN ('upsert', 'delete')), + created_at TIMESTAMPTZ NOT NULL DEFAULT now(), + next_attempt_at TIMESTAMPTZ NOT NULL DEFAULT now(), + locked_at TIMESTAMPTZ NULL, + lock_owner TEXT NULL, + attempt_count INT NOT NULL DEFAULT 0, + last_error TEXT NULL, dead_lettered_at TIMESTAMPTZ NULL ); @@ -33,14 +32,10 @@ CREATE POLICY "Allow to create a user sync queue item" TO trigger_user WITH CHECK (TRUE); --- Keep direct insert-sync and also enqueue CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN - INSERT INTO public.users (id, email) - VALUES (NEW.id, NEW.email); - INSERT INTO public.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); @@ -48,20 +43,10 @@ BEGIN END; $func$ SECURITY DEFINER SET search_path = public; --- Keep direct update-sync and also enqueue when mirrored fields change CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN - UPDATE public.users - SET email = NEW.email, - updated_at = now() - WHERE id = NEW.id; - - IF NOT FOUND THEN - RAISE EXCEPTION 'User with id % does not exist in public.users', NEW.id; - END IF; - IF OLD.email IS DISTINCT FROM NEW.email THEN INSERT INTO public.user_sync_queue (user_id, operation) VALUES (NEW.id, 'upsert'); @@ -71,13 +56,10 @@ BEGIN END; $func$ SECURITY DEFINER SET search_path = public; --- Keep direct delete-sync and also enqueue CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN - DELETE FROM public.users WHERE id = OLD.id; - INSERT INTO public.user_sync_queue (user_id, operation) VALUES (OLD.id, 'delete'); @@ -85,23 +67,43 @@ BEGIN END; $func$ SECURITY DEFINER SET search_path = public; +ALTER FUNCTION public.sync_insert_auth_users_to_public_users_trigger() OWNER TO trigger_user; +ALTER FUNCTION public.sync_update_auth_users_to_public_users_trigger() OWNER TO trigger_user; +ALTER FUNCTION public.sync_delete_auth_users_to_public_users_trigger() OWNER TO trigger_user; + +DROP TRIGGER IF EXISTS sync_inserts_to_public_users ON auth.users; +CREATE TRIGGER sync_inserts_to_public_users + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_insert_auth_users_to_public_users_trigger(); + +DROP TRIGGER IF EXISTS sync_updates_to_public_users ON auth.users; +CREATE TRIGGER sync_updates_to_public_users + AFTER UPDATE ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_update_auth_users_to_public_users_trigger(); + +DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; +CREATE TRIGGER sync_deletes_to_public_users + AFTER DELETE ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); -- +goose StatementEnd -- +goose Down -- +goose StatementBegin +DROP TRIGGER IF EXISTS sync_inserts_to_public_users ON auth.users; +DROP TRIGGER IF EXISTS sync_updates_to_public_users ON auth.users; +DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; --- Restore direct insert-sync CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN INSERT INTO public.users (id, email) VALUES (NEW.id, NEW.email); + RETURN NEW; END; $func$ SECURITY DEFINER SET search_path = public; --- Restore direct update-sync CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ @@ -119,21 +121,36 @@ BEGIN END; $func$ SECURITY DEFINER SET search_path = public; --- Restore direct delete-sync CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN DELETE FROM public.users WHERE id = OLD.id; + RETURN OLD; END; $func$ SECURITY DEFINER SET search_path = public; +ALTER FUNCTION public.sync_insert_auth_users_to_public_users_trigger() OWNER TO trigger_user; +ALTER FUNCTION public.sync_update_auth_users_to_public_users_trigger() OWNER TO trigger_user; +ALTER FUNCTION public.sync_delete_auth_users_to_public_users_trigger() OWNER TO trigger_user; + +CREATE TRIGGER sync_inserts_to_public_users + AFTER INSERT ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_insert_auth_users_to_public_users_trigger(); + +CREATE TRIGGER sync_updates_to_public_users + AFTER UPDATE ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_update_auth_users_to_public_users_trigger(); + +CREATE TRIGGER sync_deletes_to_public_users + AFTER DELETE ON auth.users + FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); + REVOKE INSERT ON public.user_sync_queue FROM trigger_user; REVOKE USAGE, SELECT ON SEQUENCE public.user_sync_queue_id_seq FROM trigger_user; DROP POLICY IF EXISTS "Allow to create a user sync queue item" ON public.user_sync_queue; DROP TABLE public.user_sync_queue; - -- +goose StatementEnd diff --git a/packages/db/queries/ack.sql.go b/packages/db/pkg/auth/queries/ack.sql.go similarity index 95% rename from packages/db/queries/ack.sql.go rename to packages/db/pkg/auth/queries/ack.sql.go index b55274a6c9..2102f263db 100644 --- a/packages/db/queries/ack.sql.go +++ b/packages/db/pkg/auth/queries/ack.sql.go @@ -3,7 +3,7 @@ // sqlc v1.29.0 // source: ack.sql -package queries +package authqueries import ( "context" diff --git a/packages/db/queries/claim_batch.sql.go b/packages/db/pkg/auth/queries/claim_batch.sql.go similarity index 98% rename from packages/db/queries/claim_batch.sql.go rename to packages/db/pkg/auth/queries/claim_batch.sql.go index b36cfb0a39..6c1555549f 100644 --- a/packages/db/queries/claim_batch.sql.go +++ b/packages/db/pkg/auth/queries/claim_batch.sql.go @@ -3,7 +3,7 @@ // sqlc v1.29.0 // source: claim_batch.sql -package queries +package authqueries import ( "context" diff --git a/packages/db/queries/dead_letter.sql.go b/packages/db/pkg/auth/queries/dead_letter.sql.go similarity index 97% rename from packages/db/queries/dead_letter.sql.go rename to packages/db/pkg/auth/queries/dead_letter.sql.go index b9b7941d8a..de2bcb3e80 100644 --- a/packages/db/queries/dead_letter.sql.go +++ b/packages/db/pkg/auth/queries/dead_letter.sql.go @@ -3,7 +3,7 @@ // sqlc v1.29.0 // source: dead_letter.sql -package queries +package authqueries import ( "context" diff --git a/packages/db/queries/get_auth_user.sql.go b/packages/db/pkg/auth/queries/get_auth_user.sql.go similarity index 95% rename from packages/db/queries/get_auth_user.sql.go rename to packages/db/pkg/auth/queries/get_auth_user.sql.go index 4b7c341df2..4c34da8bdf 100644 --- a/packages/db/queries/get_auth_user.sql.go +++ b/packages/db/pkg/auth/queries/get_auth_user.sql.go @@ -3,7 +3,7 @@ // sqlc v1.29.0 // source: get_auth_user.sql -package queries +package authqueries import ( "context" diff --git a/packages/db/queries/retry.sql.go b/packages/db/pkg/auth/queries/retry.sql.go similarity index 97% rename from packages/db/queries/retry.sql.go rename to packages/db/pkg/auth/queries/retry.sql.go index 941c96ae18..297fbd7c76 100644 --- a/packages/db/queries/retry.sql.go +++ b/packages/db/pkg/auth/queries/retry.sql.go @@ -3,7 +3,7 @@ // sqlc v1.29.0 // source: retry.sql -package queries +package authqueries import ( "context" diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/ack.sql rename to packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/claim_batch.sql rename to packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/dead_letter.sql rename to packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/get_auth_user.sql rename to packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/retry.sql rename to packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql diff --git a/packages/db/pkg/testutils/db.go b/packages/db/pkg/testutils/db.go index ecdf856488..86708c18c4 100644 --- a/packages/db/pkg/testutils/db.go +++ b/packages/db/pkg/testutils/db.go @@ -123,12 +123,17 @@ func runDatabaseMigrations(t *testing.T, connStr string) { }) // run the db migration - err = goose.RunWithOptionsContext( - t.Context(), - "up", - db, + for _, migrationsDir := range []string{ filepath.Join(repoRoot, "packages", "db", "migrations"), - nil, - ) - require.NoError(t, err) + filepath.Join(repoRoot, "packages", "db", "pkg", "auth", "migrations"), + } { + err = goose.RunWithOptionsContext( + t.Context(), + "up", + db, + migrationsDir, + nil, + ) + require.NoError(t, err) + } } diff --git a/packages/db/queries/models.go b/packages/db/queries/models.go index f3ec4451c9..6c960a7a59 100644 --- a/packages/db/queries/models.go +++ b/packages/db/queries/models.go @@ -226,19 +226,6 @@ type User struct { Email string } -type UserSyncQueue struct { - ID int64 - UserID uuid.UUID - Operation string - CreatedAt time.Time - NextAttemptAt time.Time - LockedAt *time.Time - LockOwner *string - AttemptCount int32 - LastError *string - DeadLetteredAt *time.Time -} - type UsersTeam struct { ID int64 UserID uuid.UUID diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql b/packages/db/queries/users/delete_public_user.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/delete_public_user.sql rename to packages/db/queries/users/delete_public_user.sql diff --git a/packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql b/packages/db/queries/users/upsert_public_user.sql similarity index 100% rename from packages/db/pkg/dashboard/sql_queries/supabase_auth_user_sync/upsert_public_user.sql rename to packages/db/queries/users/upsert_public_user.sql diff --git a/packages/db/sqlc.yaml b/packages/db/sqlc.yaml index 206c468907..4959602d19 100644 --- a/packages/db/sqlc.yaml +++ b/packages/db/sqlc.yaml @@ -63,6 +63,7 @@ sql: schema: - "migrations" - "schema" + - "pkg/auth/migrations" gen: go: emit_pointers_for_null_types: true From 4d622ceb0e262cb16e0fc394dde7b0f48ccdc41a Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 16:11:56 -0700 Subject: [PATCH 10/20] fix: lint --- .../internal/supabaseauthusersync/logging.go | 4 +- .../supabaseauthusersync/processor.go | 2 +- .../supabaseauthusersync/processor_test.go | 11 +- .../internal/supabaseauthusersync/runner.go | 2 +- .../supabaseauthusersync/runner_test.go | 244 +++++++++--------- .../supabaseauthusersync/supervisor.go | 11 +- .../supabaseauthusersync/supervisor_test.go | 6 + 7 files changed, 153 insertions(+), 127 deletions(-) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/logging.go b/packages/dashboard-api/internal/supabaseauthusersync/logging.go index c60d02a0c5..b5730330f4 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/logging.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/logging.go @@ -107,7 +107,7 @@ func (s *batchSummary) Add(result processResult) { } } -func (s batchSummary) Fields(totalDuration time.Duration) []zap.Field { +func (s *batchSummary) Fields(totalDuration time.Duration) []zap.Field { fields := []zap.Field{ zap.Int("queue_batch.claimed_count", s.ClaimedCount), zap.Int("queue_batch.acked_count", s.AckedCount), @@ -139,7 +139,7 @@ func (s batchSummary) Fields(totalDuration time.Duration) []zap.Field { return fields } -func (s batchSummary) Level() zapcore.Level { +func (s *batchSummary) Level() zapcore.Level { if s.AckFailedCount > 0 || s.RetryFailedCount > 0 || s.DeadLetteredCount > 0 || s.DeadLetterFailedCount > 0 { return zap.ErrorLevel } diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go index 1d562c8288..e25d2a0c6c 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor.go @@ -37,7 +37,7 @@ func NewProcessor(store processorStore, maxAttempts int32, l logger.Logger) *Pro } } -func (p *Processor) Process(ctx context.Context, item QueueItem) processResult { +func (p *Processor) process(ctx context.Context, item QueueItem) processResult { startedAt := time.Now() action, err := p.processOnce(ctx, item) result := processResult{ diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go index 23072901fa..125222d5ee 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go @@ -32,6 +32,7 @@ type fakeProcessorStore struct { func (s *fakeProcessorStore) Ack(_ context.Context, id int64) error { s.ackCalls = append(s.ackCalls, id) + return nil } @@ -41,6 +42,7 @@ func (s *fakeProcessorStore) Retry(_ context.Context, id int64, backoff time.Dur backoff: backoff, lastError: lastError, }) + return nil } @@ -49,6 +51,7 @@ func (s *fakeProcessorStore) DeadLetter(_ context.Context, id int64, lastError s id: id, lastError: lastError, }) + return nil } @@ -65,6 +68,8 @@ func (s *fakeProcessorStore) DeletePublicUser(_ context.Context, _ uuid.UUID) er } func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { + t.Parallel() + store := &fakeProcessorStore{ getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { panic("boom") @@ -78,7 +83,7 @@ func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { } require.NotPanics(t, func() { - processor.Process(context.Background(), item) + processor.process(context.Background(), item) }) require.Empty(t, store.ackCalls) require.Len(t, store.retryCalls, 1) @@ -87,6 +92,8 @@ func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { } func TestProcessorProcessDeadLettersRecoveredPanicAtMaxAttempts(t *testing.T) { + t.Parallel() + store := &fakeProcessorStore{ getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { panic("boom") @@ -100,7 +107,7 @@ func TestProcessorProcessDeadLettersRecoveredPanicAtMaxAttempts(t *testing.T) { } require.NotPanics(t, func() { - processor.Process(context.Background(), item) + processor.process(context.Background(), item) }) require.Empty(t, store.ackCalls) require.Empty(t, store.retryCalls) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go index fd7cfa3477..174c976a84 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner.go @@ -86,7 +86,7 @@ func (r *Runner) poll(ctx context.Context) { summary := newBatchSummary(items, claimedAt) for _, item := range items { - summary.Add(r.processor.Process(ctx, item)) + summary.Add(r.processor.process(ctx, item)) } r.l.Log(ctx, summary.Level(), "processed supabase auth sync queue batch", summary.Fields(time.Since(claimedAt))...) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go index 064d35c77e..a99e4ad3b2 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -41,153 +41,165 @@ type queueSnapshot struct { } func TestSupabaseAuthUserSyncRunner_EndToEnd(t *testing.T) { + t.Parallel() + db := testutils.SetupDatabase(t) - t.Run("repairs_insert_update_delete_drift", func(t *testing.T) { - ctx := t.Context() - userID := uuid.New() - initialEmail := fmt.Sprintf("auth-sync-%s-initial@example.com", userID.String()[:8]) - updatedEmail := fmt.Sprintf("auth-sync-%s-updated@example.com", userID.String()[:8]) + runRepairsInsertUpdateDeleteDrift(t, db) + runReclaimsStaleQueueLocks(t, db) + runDrainsBurstBacklogWithMultipleRunners(t, db) +} - insertAuthUser(t, ctx, db, userID, initialEmail) - deletePublicUser(t, ctx, db, userID) - assertQueueBacklog(t, ctx, db, 1) - - insertRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-insert") - t.Cleanup(func() { - insertRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: initialEmail, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) +func runRepairsInsertUpdateDeleteDrift(t *testing.T, db *testutils.Database) { + t.Helper() + + ctx := t.Context() + userID := uuid.New() + initialEmail := fmt.Sprintf("auth-sync-%s-initial@example.com", userID.String()[:8]) + updatedEmail := fmt.Sprintf("auth-sync-%s-updated@example.com", userID.String()[:8]) + + insertAuthUser(t, ctx, db, userID, initialEmail) + deletePublicUser(t, ctx, db, userID) + assertQueueBacklog(t, ctx, db, 1) + + insertRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-insert") + t.Cleanup(func() { insertRunner.Stop(t) + }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: initialEmail, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + insertRunner.Stop(t) + + updateAuthUserEmail(t, ctx, db, userID, updatedEmail) + setPublicUserEmail(t, ctx, db, userID, "stale@example.com") + assertQueueBacklog(t, ctx, db, 1) - updateAuthUserEmail(t, ctx, db, userID, updatedEmail) - setPublicUserEmail(t, ctx, db, userID, "stale@example.com") - assertQueueBacklog(t, ctx, db, 1) - - updateRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-update") - t.Cleanup(func() { - updateRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: updatedEmail, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) + updateRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-update") + t.Cleanup(func() { updateRunner.Stop(t) + }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: updatedEmail, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + updateRunner.Stop(t) - deleteAuthUser(t, ctx, db, userID) - insertPublicUser(t, ctx, db, userID, "ghost@example.com") - assertQueueBacklog(t, ctx, db, 1) - - deleteRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-delete") - t.Cleanup(func() { - deleteRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Exists: false, - }, - }) - waitForQueueDrain(t, ctx, db) + deleteAuthUser(t, ctx, db, userID) + insertPublicUser(t, ctx, db, userID, "ghost@example.com") + assertQueueBacklog(t, ctx, db, 1) + + deleteRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-delete") + t.Cleanup(func() { deleteRunner.Stop(t) }) + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Exists: false, + }, + }) + waitForQueueDrain(t, ctx, db) + deleteRunner.Stop(t) +} - t.Run("reclaims_stale_queue_locks", func(t *testing.T) { - ctx := t.Context() - userID := uuid.New() - email := fmt.Sprintf("auth-sync-%s-locked@example.com", userID.String()[:8]) - - insertAuthUser(t, ctx, db, userID, email) - deletePublicUser(t, ctx, db, userID) - lockQueueItems(t, ctx, db, userID, time.Now().Add(-time.Minute), "stale-worker") - assertQueueBacklog(t, ctx, db, 1) - - runner := startRunnerProcess(t, db, newTestRunnerConfig(2), "lock-reclaimer") - t.Cleanup(func() { - runner.Stop(t) - }) - - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: email, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) +func runReclaimsStaleQueueLocks(t *testing.T, db *testutils.Database) { + t.Helper() + + ctx := t.Context() + userID := uuid.New() + email := fmt.Sprintf("auth-sync-%s-locked@example.com", userID.String()[:8]) + + insertAuthUser(t, ctx, db, userID, email) + deletePublicUser(t, ctx, db, userID) + lockQueueItems(t, ctx, db, userID, time.Now().Add(-time.Minute), "stale-worker") + assertQueueBacklog(t, ctx, db, 1) + + runner := startRunnerProcess(t, db, newTestRunnerConfig(2), "lock-reclaimer") + t.Cleanup(func() { runner.Stop(t) }) - t.Run("drains_burst_backlog_with_multiple_runners", func(t *testing.T) { - ctx := t.Context() - const userCount = 60 + waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ + userID: { + Email: email, + Exists: true, + }, + }) + waitForQueueDrain(t, ctx, db) + runner.Stop(t) +} - userIDs := make([]uuid.UUID, 0, userCount) +func runDrainsBurstBacklogWithMultipleRunners(t *testing.T, db *testutils.Database) { + t.Helper() - for i := 0; i < userCount; i++ { - userID := uuid.New() - userIDs = append(userIDs, userID) + ctx := t.Context() + const userCount = 60 - initialEmail := fmt.Sprintf("auth-sync-burst-%02d-initial@example.com", i) - insertAuthUser(t, ctx, db, userID, initialEmail) + userIDs := make([]uuid.UUID, 0, userCount) - if i%2 == 0 { - updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v2@example.com", i)) - } - if i%5 == 0 { - updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v3@example.com", i)) - } + for i := range userCount { + userID := uuid.New() + userIDs = append(userIDs, userID) - if i%3 == 0 { - deleteAuthUser(t, ctx, db, userID) - enqueueUserSyncItem(t, ctx, db, userID, "delete") - if i%6 == 0 { - insertPublicUser(t, ctx, db, userID, fmt.Sprintf("ghost-%02d@example.com", i)) - } + initialEmail := fmt.Sprintf("auth-sync-burst-%02d-initial@example.com", i) + insertAuthUser(t, ctx, db, userID, initialEmail) - continue - } + if i%2 == 0 { + updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v2@example.com", i)) + } + if i%5 == 0 { + updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v3@example.com", i)) + } - if i%8 == 0 { - deletePublicUser(t, ctx, db, userID) - } else if i%7 == 0 { - setPublicUserEmail(t, ctx, db, userID, fmt.Sprintf("stale-%02d@example.com", i)) + if i%3 == 0 { + deleteAuthUser(t, ctx, db, userID) + enqueueUserSyncItem(t, ctx, db, userID, "delete") + if i%6 == 0 { + insertPublicUser(t, ctx, db, userID, fmt.Sprintf("ghost-%02d@example.com", i)) } - if i%4 == 0 { - enqueueUserSyncItem(t, ctx, db, userID, "upsert") - } - if i%9 == 0 { - enqueueUserSyncItem(t, ctx, db, userID, "upsert") - } + continue } - authUsers, err := loadAuthUsers(ctx, db) - require.NoError(t, err) + if i%8 == 0 { + deletePublicUser(t, ctx, db, userID) + } else if i%7 == 0 { + setPublicUserEmail(t, ctx, db, userID, fmt.Sprintf("stale-%02d@example.com", i)) + } - want := expectedUsersForIDs(userIDs, authUsers) - assertQueueBacklog(t, ctx, db, userCount) + if i%4 == 0 { + enqueueUserSyncItem(t, ctx, db, userID, "upsert") + } + if i%9 == 0 { + enqueueUserSyncItem(t, ctx, db, userID, "upsert") + } + } - runnerA := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-a") - runnerB := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-b") - t.Cleanup(func() { - runnerA.Stop(t) - runnerB.Stop(t) - }) + authUsers, err := loadAuthUsers(ctx, db) + require.NoError(t, err) - waitForPublicUsers(t, ctx, db, want) - waitForQueueDrain(t, ctx, db) + want := expectedUsersForIDs(userIDs, authUsers) + assertQueueBacklog(t, ctx, db, userCount) + runnerA := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-a") + runnerB := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-b") + t.Cleanup(func() { runnerA.Stop(t) runnerB.Stop(t) }) + + waitForPublicUsers(t, ctx, db, want) + waitForQueueDrain(t, ctx, db) + + runnerA.Stop(t) + runnerB.Stop(t) } func newTestRunnerConfig(batchSize int32) Config { diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go index 24269f351e..de60dda43a 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go @@ -69,6 +69,7 @@ func supervise(ctx context.Context, l logger.Logger, cfg supervisorConfig, run f select { case <-ctx.Done(): timer.Stop() + return ctx.Err() case <-timer.C: } @@ -95,18 +96,18 @@ func runRecovering(ctx context.Context, l logger.Logger, run func(context.Contex return err } -func restartBackoff(attempt int, base time.Duration, max time.Duration) time.Duration { +func restartBackoff(attempt int, base time.Duration, maxDelay time.Duration) time.Duration { if base <= 0 { base = defaultRestartDelay } - if max < base { - max = base + if maxDelay < base { + maxDelay = base } delay := base for i := 1; i < attempt; i++ { - if delay >= max/2 { - return max + if delay >= maxDelay/2 { + return maxDelay } delay *= 2 diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go index 3e26d71a71..fc53f77ba4 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go @@ -13,6 +13,8 @@ import ( ) func TestSuperviseRestartsAfterUnexpectedError(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -32,6 +34,7 @@ func TestSuperviseRestartsAfterUnexpectedError(t *testing.T) { cancel() <-ctx.Done() + return ctx.Err() }) }() @@ -42,6 +45,8 @@ func TestSuperviseRestartsAfterUnexpectedError(t *testing.T) { } func TestSuperviseRestartsAfterPanic(t *testing.T) { + t.Parallel() + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -61,6 +66,7 @@ func TestSuperviseRestartsAfterPanic(t *testing.T) { cancel() <-ctx.Done() + return ctx.Err() }) }() From 84ecd065e44439010822b5d49482178e65f54f1c Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 16:52:27 -0700 Subject: [PATCH 11/20] test: apply database migrations in end-to-end test setup - Updated the `TestSupabaseAuthUserSyncRunner_EndToEnd` to apply necessary database migrations before running tests. - Refactored the `SetupDatabase` function to include a new method `ApplyMigrations` for better migration management. --- .../supabaseauthusersync/runner_test.go | 1 + packages/db/pkg/testutils/db.go | 27 +++++++++++-------- 2 files changed, 17 insertions(+), 11 deletions(-) diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go index a99e4ad3b2..25d2327af1 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go @@ -44,6 +44,7 @@ func TestSupabaseAuthUserSyncRunner_EndToEnd(t *testing.T) { t.Parallel() db := testutils.SetupDatabase(t) + db.ApplyMigrations(t, "packages/db/pkg/auth/migrations") runRepairsInsertUpdateDeleteDrift(t, db) runReclaimsStaleQueueLocks(t, db) diff --git a/packages/db/pkg/testutils/db.go b/packages/db/pkg/testutils/db.go index 86708c18c4..68fa814977 100644 --- a/packages/db/pkg/testutils/db.go +++ b/packages/db/pkg/testutils/db.go @@ -38,6 +38,7 @@ type Database struct { SqlcClient *db.Client AuthDb *authdb.Client TestQueries *queries.Queries + connStr string } // SetupDatabase creates a fresh PostgreSQL container with migrations applied @@ -103,11 +104,11 @@ func SetupDatabase(t *testing.T) *Database { SqlcClient: sqlcClient, AuthDb: authDb, TestQueries: testQueries, + connStr: connStr, } } -// runDatabaseMigrations executes all required database migrations -func runDatabaseMigrations(t *testing.T, connStr string) { +func (db *Database) ApplyMigrations(t *testing.T, migrationDirs ...string) { t.Helper() cmd := exec.CommandContext(t.Context(), "git", "rev-parse", "--show-toplevel") @@ -115,25 +116,29 @@ func runDatabaseMigrations(t *testing.T, connStr string) { require.NoError(t, err, "Failed to find git root") repoRoot := strings.TrimSpace(string(output)) - db, err := goose.OpenDBWithDriver("pgx", connStr) + sqlDB, err := goose.OpenDBWithDriver("pgx", db.connStr) require.NoError(t, err) t.Cleanup(func() { - err := db.Close() + err := sqlDB.Close() assert.NoError(t, err) }) - // run the db migration - for _, migrationsDir := range []string{ - filepath.Join(repoRoot, "packages", "db", "migrations"), - filepath.Join(repoRoot, "packages", "db", "pkg", "auth", "migrations"), - } { + for _, migrationsDir := range migrationDirs { err = goose.RunWithOptionsContext( t.Context(), "up", - db, - migrationsDir, + sqlDB, + filepath.Join(repoRoot, migrationsDir), nil, ) require.NoError(t, err) } } + +// runDatabaseMigrations executes all required database migrations +func runDatabaseMigrations(t *testing.T, connStr string) { + t.Helper() + + db := &Database{connStr: connStr} + db.ApplyMigrations(t, filepath.Join("packages", "db", "migrations")) +} From 386d0c692dd30df5af8f621e16222e6b7cec196a Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 18:08:18 -0700 Subject: [PATCH 12/20] feat(sync): enhance user sync processing and acknowledgment - Introduced a new process outcome `ready_to_ack` to streamline acknowledgment handling. - Refactored the `process` method to prepare for batch acknowledgment of processed items. - Added a new `AckBatch` method in the store to handle multiple acknowledgments efficiently. - Updated the `Runner` to process items in batches and finalize acknowledgments accordingly. - Removed obsolete SQL query for single item acknowledgment as part of the refactor. - Enhanced tests to cover new deletion logic and acknowledgment scenarios. --- .../internal/supabaseauthusersync/logging.go | 1 + .../supabaseauthusersync/processor.go | 25 ++---- .../supabaseauthusersync/processor_test.go | 43 ++++++--- .../internal/supabaseauthusersync/runner.go | 87 +++++++++++++++++-- .../internal/supabaseauthusersync/store.go | 8 +- packages/db/pkg/auth/queries/ack_batch.sql.go | 20 +++++ .../supabase_auth_user_sync/ack.sql | 3 - .../supabase_auth_user_sync/ack_batch.sql | 3 + 8 files changed, 149 insertions(+), 41 deletions(-) create mode 100644 packages/db/pkg/auth/queries/ack_batch.sql.go delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql create mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql diff --git a/packages/dashboard-api/internal/supabaseauthusersync/logging.go b/packages/dashboard-api/internal/supabaseauthusersync/logging.go index b5730330f4..06759556e6 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/logging.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/logging.go @@ -13,6 +13,7 @@ import ( type processOutcome string const ( + processOutcomeReadyToAck processOutcome = "ready_to_ack" processOutcomeAcked processOutcome = "acked" processOutcomeAckFailed processOutcome = "ack_failed" processOutcomeRetried processOutcome = "retried" diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go index e25d2a0c6c..0a6e9ab369 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor.go @@ -15,7 +15,6 @@ import ( ) type processorStore interface { - Ack(ctx context.Context, id int64) error Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error DeadLetter(ctx context.Context, id int64, lastError string) error GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) @@ -46,21 +45,7 @@ func (p *Processor) process(ctx context.Context, item QueueItem) processResult { } if err == nil { - if ackErr := p.store.Ack(ctx, item.ID); ackErr != nil { - result.Outcome = processOutcomeAckFailed - - p.l.Error(ctx, "processed supabase auth sync queue item but failed to ack", - append( - processResultFields(item, result, time.Now()), - zap.NamedError("ack_error", ackErr), - )..., - ) - - return result - } - - result.Outcome = processOutcomeAcked - p.l.Info(ctx, "processed supabase auth sync queue item", processResultFields(item, result, time.Now())...) + result.Outcome = processOutcomeReadyToAck return result } @@ -140,6 +125,14 @@ func (p *Processor) processOnce(ctx context.Context, item QueueItem) (action rec } func (p *Processor) reconcile(ctx context.Context, item QueueItem) (reconcileAction, error) { + if item.Operation == "delete" { + if err := p.store.DeletePublicUser(ctx, item.UserID); err != nil { + return "", fmt.Errorf("delete public.users %s: %w", item.UserID, err) + } + + return reconcileActionDeletePublicUser, nil + } + authUser, err := p.store.GetAuthUser(ctx, item.UserID) if errors.Is(err, pgx.ErrNoRows) { diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go index 125222d5ee..446653a3d0 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go @@ -25,15 +25,9 @@ type deadLetterCall struct { type fakeProcessorStore struct { getAuthUserFn func(context.Context, uuid.UUID) (*AuthUser, error) - ackCalls []int64 - retryCalls []retryCall - deadLetterCalls []deadLetterCall -} - -func (s *fakeProcessorStore) Ack(_ context.Context, id int64) error { - s.ackCalls = append(s.ackCalls, id) - - return nil + deletePublicUserCalls int + retryCalls []retryCall + deadLetterCalls []deadLetterCall } func (s *fakeProcessorStore) Retry(_ context.Context, id int64, backoff time.Duration, lastError string) error { @@ -64,6 +58,8 @@ func (s *fakeProcessorStore) UpsertPublicUser(_ context.Context, _ uuid.UUID, _ } func (s *fakeProcessorStore) DeletePublicUser(_ context.Context, _ uuid.UUID) error { + s.deletePublicUserCalls++ + return nil } @@ -85,7 +81,6 @@ func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { require.NotPanics(t, func() { processor.process(context.Background(), item) }) - require.Empty(t, store.ackCalls) require.Len(t, store.retryCalls, 1) require.Contains(t, store.retryCalls[0].lastError, "panic while processing queue item") require.Empty(t, store.deadLetterCalls) @@ -109,8 +104,34 @@ func TestProcessorProcessDeadLettersRecoveredPanicAtMaxAttempts(t *testing.T) { require.NotPanics(t, func() { processor.process(context.Background(), item) }) - require.Empty(t, store.ackCalls) require.Empty(t, store.retryCalls) require.Len(t, store.deadLetterCalls, 1) require.Contains(t, store.deadLetterCalls[0].lastError, "panic while processing queue item") } + +func TestProcessorProcessDeleteSkipsAuthLookup(t *testing.T) { + t.Parallel() + + getAuthUserCalled := false + store := &fakeProcessorStore{ + getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { + getAuthUserCalled = true + + return nil, nil + }, + } + processor := NewProcessor(store, 3, logger.NewNopLogger()) + item := QueueItem{ + ID: 1, + UserID: uuid.New(), + Operation: "delete", + AttemptCount: 1, + } + + result := processor.process(context.Background(), item) + + require.False(t, getAuthUserCalled) + require.Equal(t, 1, store.deletePublicUserCalls) + require.Equal(t, processOutcomeReadyToAck, result.Outcome) + require.Equal(t, reconcileActionDeletePublicUser, result.Action) +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go index 174c976a84..37d014bcc6 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/runner.go @@ -13,6 +13,7 @@ import ( type runnerStore interface { ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) + AckBatch(ctx context.Context, ids []int64) error } type workerStore interface { @@ -28,6 +29,11 @@ type Runner struct { l logger.Logger } +type ackCandidate struct { + item QueueItem + result processResult +} + func NewRunner(cfg Config, authDB *authdb.Client, mainDB *sqlcdb.Client, lockOwner string, l logger.Logger) *Runner { workerLogger := l.With(logger.WithServiceInstanceID(lockOwner)) store := NewStore(authDB, mainDB) @@ -50,22 +56,39 @@ func (r *Runner) Run(ctx context.Context) error { zap.Int32("worker.max_attempts", r.cfg.MaxAttempts), ) - ticker := time.NewTicker(r.cfg.PollInterval) - defer ticker.Stop() - for { + r.drain(ctx) + if ctx.Err() != nil { + r.l.Info(ctx, "stopping supabase auth user sync worker", zap.Error(ctx.Err())) + + return ctx.Err() + } + + timer := time.NewTimer(r.cfg.PollInterval) select { case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + r.l.Info(ctx, "stopping supabase auth user sync worker", zap.Error(ctx.Err())) return ctx.Err() - case <-ticker.C: - r.poll(ctx) + case <-timer.C: + } + } +} + +func (r *Runner) drain(ctx context.Context) { + for { + processed := r.pollOnce(ctx) + if processed == 0 { + return } } } -func (r *Runner) poll(ctx context.Context) { +func (r *Runner) pollOnce(ctx context.Context) int { claimedAt := time.Now() items, err := r.store.ClaimBatch(ctx, r.lockOwner, r.cfg.LockTimeout, r.cfg.BatchSize) if err != nil { @@ -76,18 +99,64 @@ func (r *Runner) poll(ctx context.Context) { zap.Error(err), ) - return + return 0 } if len(items) == 0 { - return + return 0 } summary := newBatchSummary(items, claimedAt) + ackCandidates := make([]ackCandidate, 0, len(items)) for _, item := range items { - summary.Add(r.processor.process(ctx, item)) + result := r.processor.process(ctx, item) + if result.Outcome == processOutcomeReadyToAck { + ackCandidates = append(ackCandidates, ackCandidate{ + item: item, + result: result, + }) + + continue + } + + summary.Add(result) + } + + if len(ackCandidates) > 0 { + r.finalizeAcks(ctx, ackCandidates, &summary) } r.l.Log(ctx, summary.Level(), "processed supabase auth sync queue batch", summary.Fields(time.Since(claimedAt))...) + + return len(items) +} + +func (r *Runner) finalizeAcks(ctx context.Context, candidates []ackCandidate, summary *batchSummary) { + ids := make([]int64, 0, len(candidates)) + for _, candidate := range candidates { + ids = append(ids, candidate.item.ID) + } + + if err := r.store.AckBatch(ctx, ids); err != nil { + for _, candidate := range candidates { + candidate.result.Outcome = processOutcomeAckFailed + summary.Add(candidate.result) + + r.l.Error(ctx, "processed supabase auth sync queue item but failed to ack", + append( + processResultFields(candidate.item, candidate.result, time.Now()), + zap.NamedError("ack_error", err), + )..., + ) + } + + return + } + + for _, candidate := range candidates { + candidate.result.Outcome = processOutcomeAcked + summary.Add(candidate.result) + r.l.Info(ctx, "processed supabase auth sync queue item", processResultFields(candidate.item, candidate.result, time.Now())...) + } } diff --git a/packages/dashboard-api/internal/supabaseauthusersync/store.go b/packages/dashboard-api/internal/supabaseauthusersync/store.go index 9eee64ca93..63ebc988d0 100644 --- a/packages/dashboard-api/internal/supabaseauthusersync/store.go +++ b/packages/dashboard-api/internal/supabaseauthusersync/store.go @@ -64,8 +64,12 @@ func (s *Store) ClaimBatch(ctx context.Context, lockOwner string, lockTimeout ti return items, nil } -func (s *Store) Ack(ctx context.Context, id int64) error { - return s.authQueries.AckUserSyncQueueItem(ctx, id) +func (s *Store) AckBatch(ctx context.Context, ids []int64) error { + if len(ids) == 0 { + return nil + } + + return s.authQueries.AckUserSyncQueueItems(ctx, ids) } func (s *Store) Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error { diff --git a/packages/db/pkg/auth/queries/ack_batch.sql.go b/packages/db/pkg/auth/queries/ack_batch.sql.go new file mode 100644 index 0000000000..1b0d49a0af --- /dev/null +++ b/packages/db/pkg/auth/queries/ack_batch.sql.go @@ -0,0 +1,20 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: ack_batch.sql + +package authqueries + +import ( + "context" +) + +const ackUserSyncQueueItems = `-- name: AckUserSyncQueueItems :exec +DELETE FROM public.user_sync_queue +WHERE id = ANY($1::bigint[]) +` + +func (q *Queries) AckUserSyncQueueItems(ctx context.Context, ids []int64) error { + _, err := q.db.Exec(ctx, ackUserSyncQueueItems, ids) + return err +} diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql deleted file mode 100644 index e0d7354dc9..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack.sql +++ /dev/null @@ -1,3 +0,0 @@ --- name: AckUserSyncQueueItem :exec -DELETE FROM public.user_sync_queue -WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql new file mode 100644 index 0000000000..45dd7f6e49 --- /dev/null +++ b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql @@ -0,0 +1,3 @@ +-- name: AckUserSyncQueueItems :exec +DELETE FROM public.user_sync_queue +WHERE id = ANY(sqlc.arg(ids)::bigint[]); From 3e3fa3cd70ad19afdc83c30c073574d922691539 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 19:00:41 -0700 Subject: [PATCH 13/20] refactor(gcp): remove auth_db_connection_string resources and update references - Deleted the `auth_db_connection_string` secret and its version from the GCP configuration. - Updated references in `main.tf` and `nomad/main.tf` to use the `postgres_connection_string` instead. - Removed the corresponding variable declaration from `variables.tf` to clean up unused configurations. --- iac/provider-gcp/api.tf | 17 ----------------- iac/provider-gcp/main.tf | 1 - iac/provider-gcp/nomad/main.tf | 6 +----- iac/provider-gcp/nomad/variables.tf | 4 ---- 4 files changed, 1 insertion(+), 27 deletions(-) diff --git a/iac/provider-gcp/api.tf b/iac/provider-gcp/api.tf index c8b5586c96..26f1a1b408 100644 --- a/iac/provider-gcp/api.tf +++ b/iac/provider-gcp/api.tf @@ -27,23 +27,6 @@ resource "google_secret_manager_secret_version" "postgres_read_replica_connectio } } -resource "google_secret_manager_secret" "auth_db_connection_string" { - secret_id = "${var.prefix}auth-db-connection-string" - - replication { - auto {} - } -} - -resource "google_secret_manager_secret_version" "auth_db_connection_string" { - secret = google_secret_manager_secret.auth_db_connection_string.name - secret_data = " " - - lifecycle { - ignore_changes = [secret_data] - } -} - resource "random_password" "api_secret" { length = 32 special = false diff --git a/iac/provider-gcp/main.tf b/iac/provider-gcp/main.tf index 90294e554b..04f502e8a7 100644 --- a/iac/provider-gcp/main.tf +++ b/iac/provider-gcp/main.tf @@ -224,7 +224,6 @@ module "nomad" { api_secret = random_password.api_secret.result custom_envs_repository_name = google_artifact_registry_repository.custom_environments_repository.name postgres_connection_string_secret_name = module.init.postgres_connection_string_secret_name - auth_db_connection_string_secret_version = google_secret_manager_secret_version.auth_db_connection_string postgres_read_replica_connection_string_secret_version = google_secret_manager_secret_version.postgres_read_replica_connection_string supabase_jwt_secrets_secret_name = module.init.supabase_jwt_secret_name posthog_api_key_secret_name = module.init.posthog_api_key_secret_name diff --git a/iac/provider-gcp/nomad/main.tf b/iac/provider-gcp/nomad/main.tf index 7d05bf3dec..87f3593b7d 100644 --- a/iac/provider-gcp/nomad/main.tf +++ b/iac/provider-gcp/nomad/main.tf @@ -10,10 +10,6 @@ data "google_secret_manager_secret_version" "postgres_connection_string" { secret = var.postgres_connection_string_secret_name } -data "google_secret_manager_secret_version" "auth_db_connection_string" { - secret = var.auth_db_connection_string_secret_version.secret -} - data "google_secret_manager_secret_version" "postgres_read_replica_connection_string" { secret = var.postgres_read_replica_connection_string_secret_version.secret } @@ -135,7 +131,7 @@ module "dashboard_api" { image = data.google_artifact_registry_docker_image.dashboard_api_image[0].self_link postgres_connection_string = data.google_secret_manager_secret_version.postgres_connection_string.secret_data - auth_db_connection_string = trimspace(data.google_secret_manager_secret_version.auth_db_connection_string.secret_data) + auth_db_connection_string = data.google_secret_manager_secret_version.postgres_connection_string.secret_data auth_db_read_replica_connection_string = trimspace(data.google_secret_manager_secret_version.postgres_read_replica_connection_string.secret_data) clickhouse_connection_string = local.clickhouse_connection_string supabase_jwt_secrets = trimspace(data.google_secret_manager_secret_version.supabase_jwt_secrets.secret_data) diff --git a/iac/provider-gcp/nomad/variables.tf b/iac/provider-gcp/nomad/variables.tf index 64e9f7dd75..6d179f5c86 100644 --- a/iac/provider-gcp/nomad/variables.tf +++ b/iac/provider-gcp/nomad/variables.tf @@ -175,10 +175,6 @@ variable "postgres_connection_string_secret_name" { type = string } -variable "auth_db_connection_string_secret_version" { - type = any -} - variable "postgres_read_replica_connection_string_secret_version" { type = any } From 5e0972bf35b6f70443c4374ebf1422cdbe864967 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Tue, 31 Mar 2026 19:37:58 -0700 Subject: [PATCH 14/20] chore(dashboard-api): refactor environment variable management - Updated the dashboard API module to separate base and extra environment variables. - Introduced a precondition to prevent conflicts with reserved keys in extra environment variables. - Modified the HCL job configuration to iterate over environment variables dynamically. - Adjusted variable declarations to reflect the new structure for extra environment variables. --- .env.gcp.template | 3 +- .../job-dashboard-api/jobs/dashboard-api.hcl | 18 ++------ iac/modules/job-dashboard-api/main.tf | 43 ++++++++++++++----- iac/modules/job-dashboard-api/variables.tf | 2 +- iac/provider-gcp/nomad/main.tf | 2 +- 5 files changed, 40 insertions(+), 28 deletions(-) diff --git a/.env.gcp.template b/.env.gcp.template index 6f72d2d77c..5269692a3e 100644 --- a/.env.gcp.template +++ b/.env.gcp.template @@ -77,7 +77,8 @@ CLICKHOUSE_CLUSTER_SIZE=1 # Dashboard API instance count (default: 0) DASHBOARD_API_COUNT= -# Additional dashboard-api env vars passed directly to the Nomad job (default: {}) +# Additional non-reserved dashboard-api env vars passed directly to the Nomad job (default: {}) +# Reserved keys managed by the module cannot be overridden here. # Example: '{"SUPABASE_AUTH_USER_SYNC_ENABLED":"true"}' DASHBOARD_API_ENV_VARS= diff --git a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl index 86a72cab50..f05aadcbb5 100644 --- a/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl +++ b/iac/modules/job-dashboard-api/jobs/dashboard-api.hcl @@ -71,21 +71,9 @@ job "dashboard-api" { } env { - GIN_MODE = "release" - ENVIRONMENT = "${environment}" - NODE_ID = "$${node.unique.id}" - PORT = "$${NOMAD_PORT_api}" - POSTGRES_CONNECTION_STRING = "${postgres_connection_string}" - AUTH_DB_CONNECTION_STRING = "${auth_db_connection_string}" - AUTH_DB_READ_REPLICA_CONNECTION_STRING = "${auth_db_read_replica_connection_string}" - CLICKHOUSE_CONNECTION_STRING = "${clickhouse_connection_string}" - SUPABASE_JWT_SECRETS = "${supabase_jwt_secrets}" - OTEL_COLLECTOR_GRPC_ENDPOINT = "${otel_collector_grpc_endpoint}" - LOGS_COLLECTOR_ADDRESS = "${logs_collector_address}" - - %{ for key, val in env } - ${ key } = "${ val }" - %{ endfor } + %{ for key in sort(keys(env)) ~} + ${key} = "${env[key]}" + %{ endfor ~} } config { diff --git a/iac/modules/job-dashboard-api/main.tf b/iac/modules/job-dashboard-api/main.tf index aa577bfd2f..26dcc2a889 100644 --- a/iac/modules/job-dashboard-api/main.tf +++ b/iac/modules/job-dashboard-api/main.tf @@ -1,5 +1,29 @@ locals { - env = { for key, value in var.env : key => value if value != null && value != "" } + base_env = { + GIN_MODE = "release" + ENVIRONMENT = var.environment + NODE_ID = "$${node.unique.id}" + PORT = "$${NOMAD_PORT_api}" + POSTGRES_CONNECTION_STRING = var.postgres_connection_string + AUTH_DB_CONNECTION_STRING = var.auth_db_connection_string + AUTH_DB_READ_REPLICA_CONNECTION_STRING = var.auth_db_read_replica_connection_string + CLICKHOUSE_CONNECTION_STRING = var.clickhouse_connection_string + SUPABASE_JWT_SECRETS = var.supabase_jwt_secrets + OTEL_COLLECTOR_GRPC_ENDPOINT = "localhost:${var.otel_collector_grpc_port}" + LOGS_COLLECTOR_ADDRESS = "http://localhost:${var.logs_proxy_port.port}" + } + + extra_env = { + for key, value in var.extra_env : key => value + if value != null && trimspace(value) != "" + } + + conflicting_extra_env_keys = sort(tolist(setintersection( + toset(keys(local.base_env)), + toset(keys(local.extra_env)), + ))) + + env = merge(local.base_env, local.extra_env) } resource "nomad_job" "dashboard_api" { @@ -14,16 +38,15 @@ resource "nomad_job" "dashboard_api" { memory_mb = 512 cpu_count = 1 - postgres_connection_string = var.postgres_connection_string - auth_db_connection_string = var.auth_db_connection_string - auth_db_read_replica_connection_string = var.auth_db_read_replica_connection_string - clickhouse_connection_string = var.clickhouse_connection_string - supabase_jwt_secrets = var.supabase_jwt_secrets - env = local.env + env = local.env subdomain = "dashboard-api" - - otel_collector_grpc_endpoint = "localhost:${var.otel_collector_grpc_port}" - logs_collector_address = "http://localhost:${var.logs_proxy_port.port}" }) + + lifecycle { + precondition { + condition = length(local.conflicting_extra_env_keys) == 0 + error_message = "dashboard-api extra_env contains reserved keys: ${join(", ", local.conflicting_extra_env_keys)}" + } + } } diff --git a/iac/modules/job-dashboard-api/variables.tf b/iac/modules/job-dashboard-api/variables.tf index 625da7c82b..ab02574467 100644 --- a/iac/modules/job-dashboard-api/variables.tf +++ b/iac/modules/job-dashboard-api/variables.tf @@ -44,7 +44,7 @@ variable "supabase_jwt_secrets" { sensitive = true } -variable "env" { +variable "extra_env" { type = map(string) default = {} } diff --git a/iac/provider-gcp/nomad/main.tf b/iac/provider-gcp/nomad/main.tf index 87f3593b7d..21e17909b2 100644 --- a/iac/provider-gcp/nomad/main.tf +++ b/iac/provider-gcp/nomad/main.tf @@ -135,7 +135,7 @@ module "dashboard_api" { auth_db_read_replica_connection_string = trimspace(data.google_secret_manager_secret_version.postgres_read_replica_connection_string.secret_data) clickhouse_connection_string = local.clickhouse_connection_string supabase_jwt_secrets = trimspace(data.google_secret_manager_secret_version.supabase_jwt_secrets.secret_data) - env = var.dashboard_api_env_vars + extra_env = var.dashboard_api_env_vars otel_collector_grpc_port = var.otel_collector_grpc_port logs_proxy_port = var.logs_proxy_port From 6b5a5c8e33ad69348d3effab6db42926628bd788 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Wed, 1 Apr 2026 13:34:12 -0700 Subject: [PATCH 15/20] refactor: use river for queue worker --- packages/dashboard-api/Makefile | 10 +- packages/dashboard-api/go.mod | 14 +- packages/dashboard-api/go.sum | 24 + .../backgroundworker/auth_user_sync.go | 139 +++++ .../backgroundworker/auth_user_sync_test.go | 252 +++++++++ .../internal/backgroundworker/river.go | 38 ++ .../internal/supabaseauthusersync/config.go | 28 - .../internal/supabaseauthusersync/logging.go | 214 -------- .../supabaseauthusersync/processor.go | 170 ------ .../supabaseauthusersync/processor_test.go | 137 ----- .../internal/supabaseauthusersync/runner.go | 162 ------ .../supabaseauthusersync/runner_test.go | 494 ------------------ .../internal/supabaseauthusersync/store.go | 115 ---- .../supabaseauthusersync/supervisor.go | 117 ----- .../supabaseauthusersync/supervisor_test.go | 77 --- packages/dashboard-api/main.go | 49 +- packages/db/pkg/auth/client.go | 4 + ...1000003_river_auth_user_sync_triggers.sql} | 104 ++-- packages/db/pkg/auth/queries/models.go | 13 - .../supabase_auth_user_sync/ack_batch.sql | 3 - .../supabase_auth_user_sync/claim_batch.sql | 17 - .../supabase_auth_user_sync/dead_letter.sql | 8 - .../supabase_auth_user_sync/get_auth_user.sql | 4 - .../supabase_auth_user_sync/retry.sql | 8 - packages/db/pkg/testutils/db.go | 40 +- 25 files changed, 592 insertions(+), 1649 deletions(-) create mode 100644 packages/dashboard-api/internal/backgroundworker/auth_user_sync.go create mode 100644 packages/dashboard-api/internal/backgroundworker/auth_user_sync_test.go create mode 100644 packages/dashboard-api/internal/backgroundworker/river.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/config.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/logging.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/processor.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/processor_test.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/runner.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/runner_test.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/store.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/supervisor.go delete mode 100644 packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go rename packages/db/pkg/auth/migrations/{20260328000001_dashboard_supabase_auth_user_sync_queue.sql => 20260401000003_river_auth_user_sync_triggers.sql} (66%) delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql delete mode 100644 packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql diff --git a/packages/dashboard-api/Makefile b/packages/dashboard-api/Makefile index 5ce6efda2a..1dfef7c5fe 100644 --- a/packages/dashboard-api/Makefile +++ b/packages/dashboard-api/Makefile @@ -13,6 +13,10 @@ endif HOSTNAME := $(shell hostname 2> /dev/null || hostnamectl hostname 2> /dev/null) $(if $(HOSTNAME),,$(error Failed to determine hostname: both 'hostname' and 'hostnamectl' failed)) +define DASHBOARD_API_EXTRA_ENV +$$(printf '%s' "$${DASHBOARD_API_ENV_VARS:-}" | jq -r '(if .=="" then empty elif type=="string" then (fromjson? // empty) else . end) | to_entries? // [] | map("\(.key)=\(.value|tostring|@sh)") | join(" ")') +endef + .PHONY: generate generate: go generate ./... @@ -33,12 +37,14 @@ build-and-upload: .PHONY: run run: make build - ./bin/dashboard-api + @EXTRA_ENV=$(DASHBOARD_API_EXTRA_ENV); \ + eval "env $$EXTRA_ENV ./bin/dashboard-api" .PHONY: run-local run-local: make build - NODE_ID=$(HOSTNAME) ./bin/dashboard-api + @EXTRA_ENV=$(DASHBOARD_API_EXTRA_ENV); \ + eval "env NODE_ID=$(HOSTNAME) $$EXTRA_ENV ./bin/dashboard-api" .PHONY: test test: diff --git a/packages/dashboard-api/go.mod b/packages/dashboard-api/go.mod index c92076b4f9..79457e0125 100644 --- a/packages/dashboard-api/go.mod +++ b/packages/dashboard-api/go.mod @@ -20,7 +20,7 @@ require ( github.com/gin-contrib/cors v1.7.6 github.com/gin-gonic/gin v1.10.1 github.com/google/uuid v1.6.0 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/oapi-codegen/gin-middleware v1.0.2 github.com/oapi-codegen/runtime v1.1.1 github.com/stretchr/testify v1.11.1 @@ -113,6 +113,11 @@ require ( github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/pressly/goose/v3 v3.26.0 // indirect github.com/redis/go-redis/v9 v9.17.3 // indirect + github.com/riverqueue/river v0.32.0 // indirect + github.com/riverqueue/river/riverdriver v0.32.0 // indirect + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 // indirect + github.com/riverqueue/river/rivershared v0.32.0 // indirect + github.com/riverqueue/river/rivertype v0.32.0 // indirect github.com/segmentio/asm v1.2.0 // indirect github.com/sethvargo/go-retry v0.3.0 // indirect github.com/shirou/gopsutil/v4 v4.25.9 // indirect @@ -120,6 +125,10 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/testcontainers/testcontainers-go v0.40.0 // indirect github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 // indirect + github.com/tidwall/gjson v1.18.0 // indirect + github.com/tidwall/match v1.2.0 // indirect + github.com/tidwall/pretty v1.2.1 // indirect + github.com/tidwall/sjson v1.2.5 // indirect github.com/tklauser/go-sysconf v0.3.15 // indirect github.com/tklauser/numcpus v0.10.0 // indirect github.com/twitchyliquid64/golang-asm v0.15.1 // indirect @@ -142,10 +151,11 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect go.opentelemetry.io/otel/trace v1.41.0 // indirect go.opentelemetry.io/proto/otlp v1.9.0 // indirect + go.uber.org/goleak v1.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/arch v0.18.0 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/dashboard-api/go.sum b/packages/dashboard-api/go.sum index 8bc4994cc9..d724d1c16c 100644 --- a/packages/dashboard-api/go.sum +++ b/packages/dashboard-api/go.sum @@ -141,6 +141,8 @@ github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7Ulw github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -245,6 +247,16 @@ github.com/redis/go-redis/v9 v9.17.3 h1:fN29NdNrE17KttK5Ndf20buqfDZwGNgoUr9qjl1D github.com/redis/go-redis/v9 v9.17.3/go.mod h1:u410H11HMLoB+TP67dz8rL9s6QW2j76l0//kSOd3370= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/riverqueue/river v0.32.0 h1:j15EoFZ4oQWXcCq8NyzWwoi3fdaO8mECTB100NSv9Qw= +github.com/riverqueue/river v0.32.0/go.mod h1:zABAdLze3HI7K02N+veikXyK5FjiLzjimnQpZ1Duyng= +github.com/riverqueue/river/riverdriver v0.32.0 h1:AG6a2hNVOIGLx/+3IRtbwofJRYEI7xqnVVxULe9s4Lg= +github.com/riverqueue/river/riverdriver v0.32.0/go.mod h1:FRDMuqnLOsakeJOHlozKK+VH7W7NLp+6EToxQ2JAjBE= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 h1:CqrRxxcdA/0sHkxLNldsQff9DIG5qxn2EJO09Pau3w0= +github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0/go.mod h1:j45UPpbMpcI10m+huTeNUaOwzoLJcEg0K6ihWXWeOec= +github.com/riverqueue/river/rivershared v0.32.0 h1:7DwdrppMU9uoU2iU9aGQiv91nBezjlcI85NV4PmnLHw= +github.com/riverqueue/river/rivershared v0.32.0/go.mod h1:UE7GEj3zaTV3cKw7Q3angCozlNEGsL50xZBKJQ9m6zU= +github.com/riverqueue/river/rivertype v0.32.0 h1:RW7uodfl86gYkjwDponTAPNnUqM+X6BjlsNHxbt6Ztg= +github.com/riverqueue/river/rivertype v0.32.0/go.mod h1:D1Ad+EaZiaXbQbJcJcfeicXJMBKno0n6UcfKI5Q7DIQ= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= @@ -275,7 +287,18 @@ github.com/testcontainers/testcontainers-go v0.40.0 h1:pSdJYLOVgLE8YdUY2FHQ1Fxu+ github.com/testcontainers/testcontainers-go v0.40.0/go.mod h1:FSXV5KQtX2HAMlm7U3APNyLkkap35zNLxukw9oBi/MY= github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 h1:REJz+XwNpGC/dCgTfYvM4SKqobNqDBfvhq74s2oHTUM= github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0/go.mod h1:4K2OhtHEeT+JSIFX4V8DkGKsyLa96Y2vLdd3xsxD5HE= +github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/match v1.2.0 h1:0pt8FlkOwjN2fPt4bIl4BoNxb98gGHN2ObFEDkrfZnM= +github.com/tidwall/match v1.2.0/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4= github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4= github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso= @@ -355,6 +378,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go new file mode 100644 index 0000000000..f999926ae2 --- /dev/null +++ b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go @@ -0,0 +1,139 @@ +package backgroundworker + +import ( + "context" + "fmt" + + "github.com/google/uuid" + "github.com/riverqueue/river" + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.uber.org/zap" + + sqlcdb "github.com/e2b-dev/infra/packages/db/client" + "github.com/e2b-dev/infra/packages/db/queries" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" + "github.com/e2b-dev/infra/packages/shared/pkg/telemetry" +) + +const AuthUserSyncKind = "auth_user_sync" + +type AuthUserSyncArgs struct { + UserID string `json:"user_id"` + Operation string `json:"operation"` + Email string `json:"email,omitempty"` +} + +func (AuthUserSyncArgs) Kind() string { return AuthUserSyncKind } + +type AuthUserSyncWorker struct { + river.WorkerDefaults[AuthUserSyncArgs] + + mainDB *sqlcdb.Client + l logger.Logger + jobsCounter metric.Int64Counter +} + +func NewAuthUserSyncWorker(mainDB *sqlcdb.Client, l logger.Logger) *AuthUserSyncWorker { + jobsCounter, err := otel.Meter("dashboard-api.backgroundworker.auth_user_sync").Int64Counter( + "dashboard_api.auth_user_sync.jobs_total", + metric.WithDescription("Total auth user sync jobs by operation and result."), + metric.WithUnit("{job}"), + ) + if err != nil { + l.Warn(context.Background(), "failed to initialize auth user sync metric", zap.Error(err)) + } + + return &AuthUserSyncWorker{ + mainDB: mainDB, + l: l, + jobsCounter: jobsCounter, + } +} + +func (w *AuthUserSyncWorker) Work(ctx context.Context, job *river.Job[AuthUserSyncArgs]) error { + attrs := []attribute.KeyValue{ + attribute.String("job.kind", AuthUserSyncKind), + attribute.String("job.operation", job.Args.Operation), + attribute.Int64("job.id", job.ID), + telemetry.WithUserID(job.Args.UserID), + } + telemetry.ReportEvent(ctx, "auth_user_sync.job.started", attrs...) + + userID, err := uuid.Parse(job.Args.UserID) + if err != nil { + telemetry.ReportError(ctx, "auth user sync parse user_id", err, attrs...) + w.observeJob(ctx, job.Args.Operation, "error") + + return fmt.Errorf("parse user_id %q: %w", job.Args.UserID, err) + } + + w.l.Info(ctx, "processing auth user sync job", + zap.String("job.kind", AuthUserSyncKind), + zap.Int64("job.id", job.ID), + zap.String("job.operation", job.Args.Operation), + logger.WithUserID(job.Args.UserID), + zap.Int("job.attempt", job.Attempt), + ) + + switch job.Args.Operation { + case "delete": + if err := w.mainDB.DeletePublicUser(ctx, userID); err != nil { + telemetry.ReportError(ctx, "auth user sync delete public user", err, attrs...) + w.observeJob(ctx, job.Args.Operation, "error") + + return fmt.Errorf("delete public.users %s: %w", userID, err) + } + + case "upsert": + if job.Args.Email == "" { + err := fmt.Errorf("missing email in job args") + telemetry.ReportError(ctx, "auth user sync missing email", err, attrs...) + w.observeJob(ctx, job.Args.Operation, "error") + + return fmt.Errorf("upsert public.users %s: missing email in job args", userID) + } + + if err := w.mainDB.UpsertPublicUser(ctx, queries.UpsertPublicUserParams{ + ID: userID, + Email: job.Args.Email, + }); err != nil { + telemetry.ReportError(ctx, "auth user sync upsert public user", err, attrs...) + w.observeJob(ctx, job.Args.Operation, "error") + + return fmt.Errorf("upsert public.users %s: %w", userID, err) + } + + default: + err := fmt.Errorf("unknown operation %q", job.Args.Operation) + telemetry.ReportError(ctx, "auth user sync unknown operation", err, attrs...) + w.observeJob(ctx, job.Args.Operation, "error") + + return fmt.Errorf("unknown operation %q for user %s", job.Args.Operation, userID) + } + + w.l.Info(ctx, "completed auth user sync job", + zap.String("job.kind", AuthUserSyncKind), + zap.Int64("job.id", job.ID), + zap.String("job.operation", job.Args.Operation), + logger.WithUserID(job.Args.UserID), + ) + telemetry.ReportEvent(ctx, "auth_user_sync.job.completed", attrs...) + w.observeJob(ctx, job.Args.Operation, "success") + + return nil +} + +func (w *AuthUserSyncWorker) observeJob(ctx context.Context, operation, result string) { + if w.jobsCounter == nil { + return + } + + w.jobsCounter.Add(ctx, 1, metric.WithAttributes( + attribute.String("worker", "supabase_auth_user_sync"), + attribute.String("job.kind", AuthUserSyncKind), + attribute.String("job.operation", operation), + attribute.String("result", result), + )) +} diff --git a/packages/dashboard-api/internal/backgroundworker/auth_user_sync_test.go b/packages/dashboard-api/internal/backgroundworker/auth_user_sync_test.go new file mode 100644 index 0000000000..7d0e928b66 --- /dev/null +++ b/packages/dashboard-api/internal/backgroundworker/auth_user_sync_test.go @@ -0,0 +1,252 @@ +package backgroundworker + +import ( + "context" + "fmt" + "sync" + "testing" + "time" + + "github.com/google/uuid" + "github.com/jackc/pgx/v5" + "github.com/riverqueue/river" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/e2b-dev/infra/packages/db/pkg/testutils" + "github.com/e2b-dev/infra/packages/shared/pkg/logger" +) + +const ( + testEventuallyTimeout = 10 * time.Second + testEventuallyTick = 50 * time.Millisecond + testStopTimeout = 5 * time.Second +) + +type riverProcess struct { + cancel context.CancelFunc + done chan struct{} + stopOnce sync.Once +} + +func TestAuthUserSync_EndToEnd(t *testing.T) { + t.Parallel() + + db := testutils.SetupDatabase(t) + + authMigrationsDir := "packages/db/pkg/auth/migrations" + + db.ApplyMigrationsUpTo(t, 20260401000001, authMigrationsDir) + + authPool := db.AuthDb.WritePool() + require.NoError(t, RunRiverMigrations(t.Context(), authPool)) + + db.ApplyMigrations(t, authMigrationsDir) + + runUpsertProjection(t, db) + runDeleteProjection(t, db) + runBurstBacklog(t, db) +} + +func runUpsertProjection(t *testing.T, db *testutils.Database) { + t.Helper() + + ctx := t.Context() + userID := uuid.New() + email := fmt.Sprintf("river-sync-%s@example.com", userID.String()[:8]) + + proc := startRiverWorker(t, db) + t.Cleanup(func() { proc.Stop(t) }) + + insertAuthUser(t, ctx, db, userID, email) + + waitForPublicUser(t, ctx, db, userID, email) + + updatedEmail := fmt.Sprintf("river-sync-%s-updated@example.com", userID.String()[:8]) + updateAuthUserEmail(t, ctx, db, userID, updatedEmail) + + waitForPublicUser(t, ctx, db, userID, updatedEmail) + + proc.Stop(t) +} + +func runDeleteProjection(t *testing.T, db *testutils.Database) { + t.Helper() + + ctx := t.Context() + userID := uuid.New() + email := fmt.Sprintf("river-del-%s@example.com", userID.String()[:8]) + + proc := startRiverWorker(t, db) + t.Cleanup(func() { proc.Stop(t) }) + + insertAuthUser(t, ctx, db, userID, email) + waitForPublicUser(t, ctx, db, userID, email) + + deleteAuthUser(t, ctx, db, userID) + waitForPublicUserGone(t, ctx, db, userID) + + proc.Stop(t) +} + +func runBurstBacklog(t *testing.T, db *testutils.Database) { + t.Helper() + + ctx := t.Context() + const userCount = 40 + + type testUser struct { + id uuid.UUID + email string + shouldDel bool + } + + users := make([]testUser, 0, userCount) + for i := range userCount { + u := testUser{ + id: uuid.New(), + email: fmt.Sprintf("river-burst-%02d@example.com", i), + shouldDel: i%3 == 0, + } + users = append(users, u) + insertAuthUser(t, ctx, db, u.id, u.email) + } + + proc := startRiverWorker(t, db) + t.Cleanup(func() { proc.Stop(t) }) + + for _, u := range users { + waitForPublicUser(t, ctx, db, u.id, u.email) + } + + for _, u := range users { + if u.shouldDel { + deleteAuthUser(t, ctx, db, u.id) + } + } + + for _, u := range users { + if u.shouldDel { + waitForPublicUserGone(t, ctx, db, u.id) + } else { + waitForPublicUser(t, ctx, db, u.id, u.email) + } + } + + proc.Stop(t) +} + +func startRiverWorker(t *testing.T, db *testutils.Database) *riverProcess { + t.Helper() + + authPool := db.AuthDb.WritePool() + l := logger.NewNopLogger() + + workers := river.NewWorkers() + river.AddWorker(workers, NewAuthUserSyncWorker(db.SqlcClient, l)) + + client, err := NewRiverClient(authPool, workers) + require.NoError(t, err) + + ctx, cancel := context.WithCancel(context.Background()) + require.NoError(t, client.Start(ctx)) + + done := make(chan struct{}) + + go func() { + <-ctx.Done() + stopCtx, stopCancel := context.WithTimeout(context.WithoutCancel(ctx), testStopTimeout) + defer stopCancel() + + _ = client.Stop(stopCtx) + close(done) + }() + + return &riverProcess{cancel: cancel, done: done} +} + +func (p *riverProcess) Stop(t *testing.T) { + t.Helper() + + p.stopOnce.Do(func() { + p.cancel() + + select { + case <-p.done: + case <-time.After(testStopTimeout): + t.Fatal("river client did not stop in time") + } + }) +} + +func insertAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() + + err := db.AuthDb.TestsRawSQL(ctx, + "INSERT INTO auth.users (id, email) VALUES ($1, $2)", userID, email) + require.NoError(t, err) +} + +func updateAuthUserEmail(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { + t.Helper() + + err := db.AuthDb.TestsRawSQL(ctx, + "UPDATE auth.users SET email = $1 WHERE id = $2", email, userID) + require.NoError(t, err) +} + +func deleteAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { + t.Helper() + + err := db.AuthDb.TestsRawSQL(ctx, + "DELETE FROM auth.users WHERE id = $1", userID) + require.NoError(t, err) +} + +func waitForPublicUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, expectedEmail string) { + t.Helper() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + var email string + + err := db.AuthDb.TestsRawSQLQuery(ctx, + "SELECT email FROM public.users WHERE id = $1", + func(rows pgx.Rows) error { + if !rows.Next() { + return fmt.Errorf("user %s not found in public.users", userID) + } + + return rows.Scan(&email) + }, userID) + + if !assert.NoError(c, err) { + return + } + + assert.Equal(c, expectedEmail, email) + }, testEventuallyTimeout, testEventuallyTick) +} + +func waitForPublicUserGone(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { + t.Helper() + + require.EventuallyWithT(t, func(c *assert.CollectT) { + var count int + + err := db.AuthDb.TestsRawSQLQuery(ctx, + "SELECT count(*) FROM public.users WHERE id = $1", + func(rows pgx.Rows) error { + if !rows.Next() { + return nil + } + + return rows.Scan(&count) + }, userID) + + if !assert.NoError(c, err) { + return + } + + assert.Equal(c, 0, count) + }, testEventuallyTimeout, testEventuallyTick) +} diff --git a/packages/dashboard-api/internal/backgroundworker/river.go b/packages/dashboard-api/internal/backgroundworker/river.go new file mode 100644 index 0000000000..a248833067 --- /dev/null +++ b/packages/dashboard-api/internal/backgroundworker/river.go @@ -0,0 +1,38 @@ +package backgroundworker + +import ( + "context" + + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" + "github.com/riverqueue/river" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivermigrate" +) + +const AuthCustomSchema = "auth_custom" + +func RunRiverMigrations(ctx context.Context, pool *pgxpool.Pool) error { + driver := riverpgxv5.New(pool) + + migrator, err := rivermigrate.New(driver, &rivermigrate.Config{ + Schema: AuthCustomSchema, + }) + if err != nil { + return err + } + + _, err = migrator.Migrate(ctx, rivermigrate.DirectionUp, nil) + + return err +} + +func NewRiverClient(pool *pgxpool.Pool, workers *river.Workers) (*river.Client[pgx.Tx], error) { + return river.NewClient(riverpgxv5.New(pool), &river.Config{ + Schema: AuthCustomSchema, + Queues: map[string]river.QueueConfig{ + "auth_sync": {MaxWorkers: 10}, + }, + Workers: workers, + }) +} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/config.go b/packages/dashboard-api/internal/supabaseauthusersync/config.go deleted file mode 100644 index 778be6d592..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/config.go +++ /dev/null @@ -1,28 +0,0 @@ -package supabaseauthusersync - -import "time" - -const ( - defaultBatchSize int32 = 50 - defaultPollInterval time.Duration = 2 * time.Second - defaultLockTimeout time.Duration = 2 * time.Minute - defaultMaxAttempts int32 = 20 -) - -type Config struct { - Enabled bool - BatchSize int32 - PollInterval time.Duration - LockTimeout time.Duration - MaxAttempts int32 -} - -func DefaultConfig() Config { - return Config{ - Enabled: false, - BatchSize: defaultBatchSize, - PollInterval: defaultPollInterval, - LockTimeout: defaultLockTimeout, - MaxAttempts: defaultMaxAttempts, - } -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/logging.go b/packages/dashboard-api/internal/supabaseauthusersync/logging.go deleted file mode 100644 index 06759556e6..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/logging.go +++ /dev/null @@ -1,214 +0,0 @@ -package supabaseauthusersync - -import ( - "sort" - "time" - - "go.uber.org/zap" - "go.uber.org/zap/zapcore" - - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -type processOutcome string - -const ( - processOutcomeReadyToAck processOutcome = "ready_to_ack" - processOutcomeAcked processOutcome = "acked" - processOutcomeAckFailed processOutcome = "ack_failed" - processOutcomeRetried processOutcome = "retried" - processOutcomeRetryFailed processOutcome = "retry_failed" - processOutcomeDeadLettered processOutcome = "dead_lettered" - processOutcomeDeadLetterFailed processOutcome = "dead_letter_failed" -) - -type reconcileAction string - -const ( - reconcileActionUpsertPublicUser reconcileAction = "upsert_public_user" - reconcileActionDeletePublicUser reconcileAction = "delete_public_user" -) - -type processResult struct { - Outcome processOutcome - Action reconcileAction - Duration time.Duration - Backoff time.Duration -} - -type batchSummary struct { - ClaimedCount int - AckedCount int - AckFailedCount int - RetriedCount int - RetryFailedCount int - DeadLetteredCount int - DeadLetterFailedCount int - MaxAttemptCount int32 - OldestCreatedAt time.Time - NewestCreatedAt time.Time - OldestItemAge time.Duration - NewestItemAge time.Duration - OperationCounts map[string]int - ActionCounts map[string]int -} - -func newBatchSummary(items []QueueItem, now time.Time) batchSummary { - summary := batchSummary{ - ClaimedCount: len(items), - OperationCounts: make(map[string]int), - ActionCounts: make(map[string]int), - } - - for i, item := range items { - if i == 0 || item.AttemptCount > summary.MaxAttemptCount { - summary.MaxAttemptCount = item.AttemptCount - } - - summary.OperationCounts[item.Operation]++ - - if item.CreatedAt.IsZero() { - continue - } - - if summary.OldestCreatedAt.IsZero() || item.CreatedAt.Before(summary.OldestCreatedAt) { - summary.OldestCreatedAt = item.CreatedAt - } - if summary.NewestCreatedAt.IsZero() || item.CreatedAt.After(summary.NewestCreatedAt) { - summary.NewestCreatedAt = item.CreatedAt - } - } - - if !summary.OldestCreatedAt.IsZero() { - summary.OldestItemAge = ageSince(summary.OldestCreatedAt, now) - summary.NewestItemAge = ageSince(summary.NewestCreatedAt, now) - } - - return summary -} - -func (s *batchSummary) Add(result processResult) { - switch result.Outcome { - case processOutcomeAcked: - s.AckedCount++ - case processOutcomeAckFailed: - s.AckFailedCount++ - case processOutcomeRetried: - s.RetriedCount++ - case processOutcomeRetryFailed: - s.RetryFailedCount++ - case processOutcomeDeadLettered: - s.DeadLetteredCount++ - case processOutcomeDeadLetterFailed: - s.DeadLetterFailedCount++ - } - - if result.Action != "" { - s.ActionCounts[string(result.Action)]++ - } -} - -func (s *batchSummary) Fields(totalDuration time.Duration) []zap.Field { - fields := []zap.Field{ - zap.Int("queue_batch.claimed_count", s.ClaimedCount), - zap.Int("queue_batch.acked_count", s.AckedCount), - zap.Int("queue_batch.ack_failed_count", s.AckFailedCount), - zap.Int("queue_batch.retried_count", s.RetriedCount), - zap.Int("queue_batch.retry_failed_count", s.RetryFailedCount), - zap.Int("queue_batch.dead_lettered_count", s.DeadLetteredCount), - zap.Int("queue_batch.dead_letter_failed_count", s.DeadLetterFailedCount), - zap.Int32("queue_batch.max_attempt", s.MaxAttemptCount), - zap.Duration("queue_batch.duration", totalDuration), - } - - if !s.OldestCreatedAt.IsZero() { - fields = append(fields, - logger.Time("queue_batch.oldest_item_created_at", s.OldestCreatedAt), - logger.Time("queue_batch.newest_item_created_at", s.NewestCreatedAt), - zap.Duration("queue_batch.oldest_item_age", s.OldestItemAge), - zap.Duration("queue_batch.newest_item_age", s.NewestItemAge), - ) - } - - if len(s.OperationCounts) > 0 { - fields = append(fields, zap.Object("queue_batch.operation_counts", countsField(s.OperationCounts))) - } - if len(s.ActionCounts) > 0 { - fields = append(fields, zap.Object("queue_batch.action_counts", countsField(s.ActionCounts))) - } - - return fields -} - -func (s *batchSummary) Level() zapcore.Level { - if s.AckFailedCount > 0 || s.RetryFailedCount > 0 || s.DeadLetteredCount > 0 || s.DeadLetterFailedCount > 0 { - return zap.ErrorLevel - } - if s.RetriedCount > 0 { - return zap.WarnLevel - } - - return zap.InfoLevel -} - -func processResultFields(item QueueItem, result processResult, now time.Time) []zap.Field { - fields := queueItemFields(item, now) - fields = append(fields, - zap.String("queue_item.outcome", string(result.Outcome)), - zap.Duration("queue_item.duration", result.Duration), - ) - - if result.Action != "" { - fields = append(fields, zap.String("queue_item.action", string(result.Action))) - } - if result.Backoff > 0 { - fields = append(fields, - zap.Duration("queue_item.retry_backoff", result.Backoff), - zap.Int32("queue_item.next_attempt", item.AttemptCount+1), - ) - } - - return fields -} - -func queueItemFields(item QueueItem, now time.Time) []zap.Field { - fields := []zap.Field{ - zap.Int64("queue_item.id", item.ID), - logger.WithUserID(item.UserID.String()), - zap.String("queue_item.operation", item.Operation), - zap.Int32("queue_item.attempt", item.AttemptCount), - } - - if !item.CreatedAt.IsZero() { - fields = append(fields, - logger.Time("queue_item.created_at", item.CreatedAt), - zap.Duration("queue_item.age", ageSince(item.CreatedAt, now)), - ) - } - - return fields -} - -func ageSince(createdAt time.Time, now time.Time) time.Duration { - if createdAt.IsZero() || now.Before(createdAt) { - return 0 - } - - return now.Sub(createdAt) -} - -type countsField map[string]int - -func (f countsField) MarshalLogObject(enc zapcore.ObjectEncoder) error { - keys := make([]string, 0, len(f)) - for key := range f { - keys = append(keys, key) - } - sort.Strings(keys) - - for _, key := range keys { - enc.AddInt(key, f[key]) - } - - return nil -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor.go b/packages/dashboard-api/internal/supabaseauthusersync/processor.go deleted file mode 100644 index 0a6e9ab369..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor.go +++ /dev/null @@ -1,170 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "go.uber.org/zap" - - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -type processorStore interface { - Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error - DeadLetter(ctx context.Context, id int64, lastError string) error - GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) - UpsertPublicUser(ctx context.Context, id uuid.UUID, email string) error - DeletePublicUser(ctx context.Context, id uuid.UUID) error -} - -type Processor struct { - store processorStore - maxAttempts int32 - l logger.Logger -} - -func NewProcessor(store processorStore, maxAttempts int32, l logger.Logger) *Processor { - return &Processor{ - store: store, - maxAttempts: maxAttempts, - l: l, - } -} - -func (p *Processor) process(ctx context.Context, item QueueItem) processResult { - startedAt := time.Now() - action, err := p.processOnce(ctx, item) - result := processResult{ - Action: action, - Duration: time.Since(startedAt), - } - - if err == nil { - result.Outcome = processOutcomeReadyToAck - - return result - } - - if item.AttemptCount >= p.maxAttempts { - if dlErr := p.store.DeadLetter(ctx, item.ID, err.Error()); dlErr != nil { - result.Outcome = processOutcomeDeadLetterFailed - - p.l.Error(ctx, "failed to dead-letter supabase auth sync queue item", - append( - processResultFields(item, result, time.Now()), - zap.Int32("queue_item.max_attempts", p.maxAttempts), - zap.NamedError("processing_error", err), - zap.NamedError("dead_letter_error", dlErr), - )..., - ) - - return result - } - - result.Outcome = processOutcomeDeadLettered - p.l.Error(ctx, "dead-lettered supabase auth sync queue item after max attempts", - append( - processResultFields(item, result, time.Now()), - zap.Int32("queue_item.max_attempts", p.maxAttempts), - zap.NamedError("processing_error", err), - )..., - ) - - return result - } - - backoff := retryBackoff(item.AttemptCount) - result.Outcome = processOutcomeRetried - result.Backoff = backoff - - if retryErr := p.store.Retry(ctx, item.ID, backoff, err.Error()); retryErr != nil { - result.Outcome = processOutcomeRetryFailed - - p.l.Error(ctx, "failed to schedule supabase auth sync queue item retry", - append( - processResultFields(item, result, time.Now()), - zap.NamedError("processing_error", err), - zap.NamedError("retry_error", retryErr), - )..., - ) - - return result - } - - p.l.Warn(ctx, "retrying supabase auth sync queue item after processing error", - append( - processResultFields(item, result, time.Now()), - zap.NamedError("processing_error", err), - )..., - ) - - return result -} - -func (p *Processor) processOnce(ctx context.Context, item QueueItem) (action reconcileAction, err error) { - defer func() { - if recovered := recover(); recovered != nil { - p.l.Error(ctx, "panic while processing supabase auth sync queue item", - append( - queueItemFields(item, time.Now()), - zap.String("worker.panic", fmt.Sprint(recovered)), - zap.String("worker.stack", string(debug.Stack())), - )..., - ) - - err = fmt.Errorf("panic while processing queue item: %v", recovered) - } - }() - - return p.reconcile(ctx, item) -} - -func (p *Processor) reconcile(ctx context.Context, item QueueItem) (reconcileAction, error) { - if item.Operation == "delete" { - if err := p.store.DeletePublicUser(ctx, item.UserID); err != nil { - return "", fmt.Errorf("delete public.users %s: %w", item.UserID, err) - } - - return reconcileActionDeletePublicUser, nil - } - - authUser, err := p.store.GetAuthUser(ctx, item.UserID) - - if errors.Is(err, pgx.ErrNoRows) { - if delErr := p.store.DeletePublicUser(ctx, item.UserID); delErr != nil { - return "", fmt.Errorf("delete public.users %s: %w", item.UserID, delErr) - } - - return reconcileActionDeletePublicUser, nil - } - - if err != nil { - return "", fmt.Errorf("get auth.users %s: %w", item.UserID, err) - } - - if err = p.store.UpsertPublicUser(ctx, authUser.ID, authUser.Email); err != nil { - return "", fmt.Errorf("upsert public.users %s: %w", authUser.ID, err) - } - - return reconcileActionUpsertPublicUser, nil -} - -func retryBackoff(attempt int32) time.Duration { - switch { - case attempt <= 1: - return 5 * time.Second - case attempt <= 3: - return 30 * time.Second - case attempt <= 6: - return 2 * time.Minute - case attempt <= 10: - return 5 * time.Minute - default: - return 15 * time.Minute - } -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go deleted file mode 100644 index 446653a3d0..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/processor_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "testing" - "time" - - "github.com/google/uuid" - "github.com/stretchr/testify/require" - - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -type retryCall struct { - id int64 - backoff time.Duration - lastError string -} - -type deadLetterCall struct { - id int64 - lastError string -} - -type fakeProcessorStore struct { - getAuthUserFn func(context.Context, uuid.UUID) (*AuthUser, error) - - deletePublicUserCalls int - retryCalls []retryCall - deadLetterCalls []deadLetterCall -} - -func (s *fakeProcessorStore) Retry(_ context.Context, id int64, backoff time.Duration, lastError string) error { - s.retryCalls = append(s.retryCalls, retryCall{ - id: id, - backoff: backoff, - lastError: lastError, - }) - - return nil -} - -func (s *fakeProcessorStore) DeadLetter(_ context.Context, id int64, lastError string) error { - s.deadLetterCalls = append(s.deadLetterCalls, deadLetterCall{ - id: id, - lastError: lastError, - }) - - return nil -} - -func (s *fakeProcessorStore) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) { - return s.getAuthUserFn(ctx, userID) -} - -func (s *fakeProcessorStore) UpsertPublicUser(_ context.Context, _ uuid.UUID, _ string) error { - return nil -} - -func (s *fakeProcessorStore) DeletePublicUser(_ context.Context, _ uuid.UUID) error { - s.deletePublicUserCalls++ - - return nil -} - -func TestProcessorProcessRetriesRecoveredPanic(t *testing.T) { - t.Parallel() - - store := &fakeProcessorStore{ - getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { - panic("boom") - }, - } - processor := NewProcessor(store, 3, logger.NewNopLogger()) - item := QueueItem{ - ID: 1, - UserID: uuid.New(), - AttemptCount: 1, - } - - require.NotPanics(t, func() { - processor.process(context.Background(), item) - }) - require.Len(t, store.retryCalls, 1) - require.Contains(t, store.retryCalls[0].lastError, "panic while processing queue item") - require.Empty(t, store.deadLetterCalls) -} - -func TestProcessorProcessDeadLettersRecoveredPanicAtMaxAttempts(t *testing.T) { - t.Parallel() - - store := &fakeProcessorStore{ - getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { - panic("boom") - }, - } - processor := NewProcessor(store, 3, logger.NewNopLogger()) - item := QueueItem{ - ID: 1, - UserID: uuid.New(), - AttemptCount: 3, - } - - require.NotPanics(t, func() { - processor.process(context.Background(), item) - }) - require.Empty(t, store.retryCalls) - require.Len(t, store.deadLetterCalls, 1) - require.Contains(t, store.deadLetterCalls[0].lastError, "panic while processing queue item") -} - -func TestProcessorProcessDeleteSkipsAuthLookup(t *testing.T) { - t.Parallel() - - getAuthUserCalled := false - store := &fakeProcessorStore{ - getAuthUserFn: func(context.Context, uuid.UUID) (*AuthUser, error) { - getAuthUserCalled = true - - return nil, nil - }, - } - processor := NewProcessor(store, 3, logger.NewNopLogger()) - item := QueueItem{ - ID: 1, - UserID: uuid.New(), - Operation: "delete", - AttemptCount: 1, - } - - result := processor.process(context.Background(), item) - - require.False(t, getAuthUserCalled) - require.Equal(t, 1, store.deletePublicUserCalls) - require.Equal(t, processOutcomeReadyToAck, result.Outcome) - require.Equal(t, reconcileActionDeletePublicUser, result.Action) -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner.go b/packages/dashboard-api/internal/supabaseauthusersync/runner.go deleted file mode 100644 index 37d014bcc6..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner.go +++ /dev/null @@ -1,162 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "time" - - "go.uber.org/zap" - - sqlcdb "github.com/e2b-dev/infra/packages/db/client" - authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -type runnerStore interface { - ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) - AckBatch(ctx context.Context, ids []int64) error -} - -type workerStore interface { - runnerStore - processorStore -} - -type Runner struct { - cfg Config - store runnerStore - processor *Processor - lockOwner string - l logger.Logger -} - -type ackCandidate struct { - item QueueItem - result processResult -} - -func NewRunner(cfg Config, authDB *authdb.Client, mainDB *sqlcdb.Client, lockOwner string, l logger.Logger) *Runner { - workerLogger := l.With(logger.WithServiceInstanceID(lockOwner)) - store := NewStore(authDB, mainDB) - - return &Runner{ - cfg: cfg, - store: store, - processor: NewProcessor(store, cfg.MaxAttempts, workerLogger), - lockOwner: lockOwner, - l: workerLogger, - } -} - -func (r *Runner) Run(ctx context.Context) error { - r.l.Info(ctx, "starting supabase auth user sync worker", - zap.String("worker.lock_owner", r.lockOwner), - zap.Duration("worker.poll_interval", r.cfg.PollInterval), - zap.Int32("worker.batch_size", r.cfg.BatchSize), - zap.Duration("worker.lock_timeout", r.cfg.LockTimeout), - zap.Int32("worker.max_attempts", r.cfg.MaxAttempts), - ) - - for { - r.drain(ctx) - if ctx.Err() != nil { - r.l.Info(ctx, "stopping supabase auth user sync worker", zap.Error(ctx.Err())) - - return ctx.Err() - } - - timer := time.NewTimer(r.cfg.PollInterval) - select { - case <-ctx.Done(): - if !timer.Stop() { - <-timer.C - } - - r.l.Info(ctx, "stopping supabase auth user sync worker", zap.Error(ctx.Err())) - - return ctx.Err() - case <-timer.C: - } - } -} - -func (r *Runner) drain(ctx context.Context) { - for { - processed := r.pollOnce(ctx) - if processed == 0 { - return - } - } -} - -func (r *Runner) pollOnce(ctx context.Context) int { - claimedAt := time.Now() - items, err := r.store.ClaimBatch(ctx, r.lockOwner, r.cfg.LockTimeout, r.cfg.BatchSize) - if err != nil { - r.l.Error(ctx, "failed to claim supabase auth sync queue batch", - zap.String("worker.lock_owner", r.lockOwner), - zap.Duration("worker.lock_timeout", r.cfg.LockTimeout), - zap.Int32("worker.batch_size", r.cfg.BatchSize), - zap.Error(err), - ) - - return 0 - } - - if len(items) == 0 { - return 0 - } - - summary := newBatchSummary(items, claimedAt) - ackCandidates := make([]ackCandidate, 0, len(items)) - - for _, item := range items { - result := r.processor.process(ctx, item) - if result.Outcome == processOutcomeReadyToAck { - ackCandidates = append(ackCandidates, ackCandidate{ - item: item, - result: result, - }) - - continue - } - - summary.Add(result) - } - - if len(ackCandidates) > 0 { - r.finalizeAcks(ctx, ackCandidates, &summary) - } - - r.l.Log(ctx, summary.Level(), "processed supabase auth sync queue batch", summary.Fields(time.Since(claimedAt))...) - - return len(items) -} - -func (r *Runner) finalizeAcks(ctx context.Context, candidates []ackCandidate, summary *batchSummary) { - ids := make([]int64, 0, len(candidates)) - for _, candidate := range candidates { - ids = append(ids, candidate.item.ID) - } - - if err := r.store.AckBatch(ctx, ids); err != nil { - for _, candidate := range candidates { - candidate.result.Outcome = processOutcomeAckFailed - summary.Add(candidate.result) - - r.l.Error(ctx, "processed supabase auth sync queue item but failed to ack", - append( - processResultFields(candidate.item, candidate.result, time.Now()), - zap.NamedError("ack_error", err), - )..., - ) - } - - return - } - - for _, candidate := range candidates { - candidate.result.Outcome = processOutcomeAcked - summary.Add(candidate.result) - r.l.Info(ctx, "processed supabase auth sync queue item", processResultFields(candidate.item, candidate.result, time.Now())...) - } -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go b/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go deleted file mode 100644 index 25d2327af1..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/runner_test.go +++ /dev/null @@ -1,494 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "fmt" - "sync" - "testing" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/e2b-dev/infra/packages/db/pkg/testutils" - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -const ( - testRunnerPollInterval = 20 * time.Millisecond - testRunnerLockTimeout = 150 * time.Millisecond - testEventuallyTimeout = 8 * time.Second - testEventuallyTick = 25 * time.Millisecond - testRunnerStopTimeout = 2 * time.Second -) - -type runnerProcess struct { - cancel context.CancelFunc - done chan error - stopOnce sync.Once -} - -type userExpectation struct { - Email string - Exists bool -} - -type queueSnapshot struct { - Total int - DeadLettered int -} - -func TestSupabaseAuthUserSyncRunner_EndToEnd(t *testing.T) { - t.Parallel() - - db := testutils.SetupDatabase(t) - db.ApplyMigrations(t, "packages/db/pkg/auth/migrations") - - runRepairsInsertUpdateDeleteDrift(t, db) - runReclaimsStaleQueueLocks(t, db) - runDrainsBurstBacklogWithMultipleRunners(t, db) -} - -func runRepairsInsertUpdateDeleteDrift(t *testing.T, db *testutils.Database) { - t.Helper() - - ctx := t.Context() - userID := uuid.New() - initialEmail := fmt.Sprintf("auth-sync-%s-initial@example.com", userID.String()[:8]) - updatedEmail := fmt.Sprintf("auth-sync-%s-updated@example.com", userID.String()[:8]) - - insertAuthUser(t, ctx, db, userID, initialEmail) - deletePublicUser(t, ctx, db, userID) - assertQueueBacklog(t, ctx, db, 1) - - insertRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-insert") - t.Cleanup(func() { - insertRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: initialEmail, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) - insertRunner.Stop(t) - - updateAuthUserEmail(t, ctx, db, userID, updatedEmail) - setPublicUserEmail(t, ctx, db, userID, "stale@example.com") - assertQueueBacklog(t, ctx, db, 1) - - updateRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-update") - t.Cleanup(func() { - updateRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: updatedEmail, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) - updateRunner.Stop(t) - - deleteAuthUser(t, ctx, db, userID) - insertPublicUser(t, ctx, db, userID, "ghost@example.com") - assertQueueBacklog(t, ctx, db, 1) - - deleteRunner := startRunnerProcess(t, db, newTestRunnerConfig(4), "repair-delete") - t.Cleanup(func() { - deleteRunner.Stop(t) - }) - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Exists: false, - }, - }) - waitForQueueDrain(t, ctx, db) - deleteRunner.Stop(t) -} - -func runReclaimsStaleQueueLocks(t *testing.T, db *testutils.Database) { - t.Helper() - - ctx := t.Context() - userID := uuid.New() - email := fmt.Sprintf("auth-sync-%s-locked@example.com", userID.String()[:8]) - - insertAuthUser(t, ctx, db, userID, email) - deletePublicUser(t, ctx, db, userID) - lockQueueItems(t, ctx, db, userID, time.Now().Add(-time.Minute), "stale-worker") - assertQueueBacklog(t, ctx, db, 1) - - runner := startRunnerProcess(t, db, newTestRunnerConfig(2), "lock-reclaimer") - t.Cleanup(func() { - runner.Stop(t) - }) - - waitForPublicUsers(t, ctx, db, map[uuid.UUID]userExpectation{ - userID: { - Email: email, - Exists: true, - }, - }) - waitForQueueDrain(t, ctx, db) - runner.Stop(t) -} - -func runDrainsBurstBacklogWithMultipleRunners(t *testing.T, db *testutils.Database) { - t.Helper() - - ctx := t.Context() - const userCount = 60 - - userIDs := make([]uuid.UUID, 0, userCount) - - for i := range userCount { - userID := uuid.New() - userIDs = append(userIDs, userID) - - initialEmail := fmt.Sprintf("auth-sync-burst-%02d-initial@example.com", i) - insertAuthUser(t, ctx, db, userID, initialEmail) - - if i%2 == 0 { - updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v2@example.com", i)) - } - if i%5 == 0 { - updateAuthUserEmail(t, ctx, db, userID, fmt.Sprintf("auth-sync-burst-%02d-v3@example.com", i)) - } - - if i%3 == 0 { - deleteAuthUser(t, ctx, db, userID) - enqueueUserSyncItem(t, ctx, db, userID, "delete") - if i%6 == 0 { - insertPublicUser(t, ctx, db, userID, fmt.Sprintf("ghost-%02d@example.com", i)) - } - - continue - } - - if i%8 == 0 { - deletePublicUser(t, ctx, db, userID) - } else if i%7 == 0 { - setPublicUserEmail(t, ctx, db, userID, fmt.Sprintf("stale-%02d@example.com", i)) - } - - if i%4 == 0 { - enqueueUserSyncItem(t, ctx, db, userID, "upsert") - } - if i%9 == 0 { - enqueueUserSyncItem(t, ctx, db, userID, "upsert") - } - } - - authUsers, err := loadAuthUsers(ctx, db) - require.NoError(t, err) - - want := expectedUsersForIDs(userIDs, authUsers) - assertQueueBacklog(t, ctx, db, userCount) - - runnerA := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-a") - runnerB := startRunnerProcess(t, db, newTestRunnerConfig(5), "burst-b") - t.Cleanup(func() { - runnerA.Stop(t) - runnerB.Stop(t) - }) - - waitForPublicUsers(t, ctx, db, want) - waitForQueueDrain(t, ctx, db) - - runnerA.Stop(t) - runnerB.Stop(t) -} - -func newTestRunnerConfig(batchSize int32) Config { - cfg := DefaultConfig() - cfg.Enabled = true - cfg.BatchSize = batchSize - cfg.PollInterval = testRunnerPollInterval - cfg.LockTimeout = testRunnerLockTimeout - cfg.MaxAttempts = 5 - - return cfg -} - -func startRunnerProcess(t *testing.T, db *testutils.Database, cfg Config, lockOwner string) *runnerProcess { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - done := make(chan error, 1) - runner := NewRunner( - cfg, - db.AuthDb, - db.SqlcClient, - lockOwner, - logger.NewNopLogger(), - ) - - go func() { - done <- runner.Run(ctx) - }() - - return &runnerProcess{ - cancel: cancel, - done: done, - } -} - -func (p *runnerProcess) Stop(t *testing.T) { - t.Helper() - - p.stopOnce.Do(func() { - p.cancel() - - select { - case err := <-p.done: - require.ErrorIs(t, err, context.Canceled) - case <-time.After(testRunnerStopTimeout): - t.Fatalf("runner did not stop within %s", testRunnerStopTimeout) - } - }) -} - -func insertAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "INSERT INTO auth.users (id, email) VALUES ($1, $2)", - userID, - email, - ) - require.NoError(t, err) -} - -func updateAuthUserEmail(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "UPDATE auth.users SET email = $1 WHERE id = $2", - email, - userID, - ) - require.NoError(t, err) -} - -func deleteAuthUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "DELETE FROM auth.users WHERE id = $1", - userID, - ) - require.NoError(t, err) -} - -func deletePublicUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "DELETE FROM public.users WHERE id = $1", - userID, - ) - require.NoError(t, err) -} - -func insertPublicUser(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, ` -INSERT INTO public.users (id, email) -VALUES ($1, $2) -ON CONFLICT (id) DO UPDATE -SET email = EXCLUDED.email, - updated_at = now() -`, - userID, - email, - ) - require.NoError(t, err) -} - -func setPublicUserEmail(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, email string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "UPDATE public.users SET email = $1, updated_at = now() WHERE id = $2", - email, - userID, - ) - require.NoError(t, err) -} - -func enqueueUserSyncItem(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, operation string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, - "INSERT INTO public.user_sync_queue (user_id, operation) VALUES ($1, $2)", - userID, - operation, - ) - require.NoError(t, err) -} - -func lockQueueItems(t *testing.T, ctx context.Context, db *testutils.Database, userID uuid.UUID, lockedAt time.Time, lockOwner string) { - t.Helper() - - err := db.AuthDb.TestsRawSQL(ctx, ` -UPDATE public.user_sync_queue -SET locked_at = $2, - lock_owner = $3 -WHERE user_id = $1 -`, - userID, - lockedAt, - lockOwner, - ) - require.NoError(t, err) -} - -func loadPublicUsers(ctx context.Context, db *testutils.Database) (map[uuid.UUID]string, error) { - users := make(map[uuid.UUID]string) - - err := db.AuthDb.TestsRawSQLQuery(ctx, - "SELECT id, email FROM public.users", - func(rows pgx.Rows) error { - for rows.Next() { - var userID uuid.UUID - var email string - if err := rows.Scan(&userID, &email); err != nil { - return err - } - - users[userID] = email - } - - return rows.Err() - }, - ) - if err != nil { - return nil, err - } - - return users, nil -} - -func loadAuthUsers(ctx context.Context, db *testutils.Database) (map[uuid.UUID]string, error) { - users := make(map[uuid.UUID]string) - - err := db.AuthDb.TestsRawSQLQuery(ctx, - "SELECT id, email FROM auth.users", - func(rows pgx.Rows) error { - for rows.Next() { - var userID uuid.UUID - var email string - if err := rows.Scan(&userID, &email); err != nil { - return err - } - - users[userID] = email - } - - return rows.Err() - }, - ) - if err != nil { - return nil, err - } - - return users, nil -} - -func loadQueueSnapshot(ctx context.Context, db *testutils.Database) (queueSnapshot, error) { - var snapshot queueSnapshot - - err := db.AuthDb.TestsRawSQLQuery(ctx, ` -SELECT - count(*)::int AS total, - count(*) FILTER (WHERE dead_lettered_at IS NOT NULL)::int AS dead_lettered -FROM public.user_sync_queue -`, - func(rows pgx.Rows) error { - if !rows.Next() { - return nil - } - - return rows.Scan(&snapshot.Total, &snapshot.DeadLettered) - }, - ) - if err != nil { - return queueSnapshot{}, err - } - - return snapshot, nil -} - -func expectedUsersForIDs(userIDs []uuid.UUID, authUsers map[uuid.UUID]string) map[uuid.UUID]userExpectation { - want := make(map[uuid.UUID]userExpectation, len(userIDs)) - - for _, userID := range userIDs { - email, ok := authUsers[userID] - want[userID] = userExpectation{ - Email: email, - Exists: ok, - } - } - - return want -} - -func assertQueueBacklog(t *testing.T, ctx context.Context, db *testutils.Database, minimum int) { - t.Helper() - - snapshot, err := loadQueueSnapshot(ctx, db) - require.NoError(t, err) - require.GreaterOrEqual(t, snapshot.Total, minimum) -} - -func waitForQueueDrain(t *testing.T, ctx context.Context, db *testutils.Database) { - t.Helper() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - snapshot, err := loadQueueSnapshot(ctx, db) - if !assert.NoError(c, err) { - return - } - - assert.Equal(c, 0, snapshot.Total) - assert.Equal(c, 0, snapshot.DeadLettered) - }, testEventuallyTimeout, testEventuallyTick) -} - -func waitForPublicUsers(t *testing.T, ctx context.Context, db *testutils.Database, want map[uuid.UUID]userExpectation) { - t.Helper() - - require.EventuallyWithT(t, func(c *assert.CollectT) { - got, err := loadPublicUsers(ctx, db) - if !assert.NoError(c, err) { - return - } - - var gotExisting int - var wantExisting int - - for userID, expectation := range want { - email, ok := got[userID] - if ok { - gotExisting++ - } - if expectation.Exists { - wantExisting++ - } - - if !assert.Equalf(c, expectation.Exists, ok, "public.users presence for %s", userID) { - continue - } - if expectation.Exists { - assert.Equalf(c, expectation.Email, email, "public.users email for %s", userID) - } - } - - assert.Equal(c, wantExisting, gotExisting) - }, testEventuallyTimeout, testEventuallyTick) -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/store.go b/packages/dashboard-api/internal/supabaseauthusersync/store.go deleted file mode 100644 index 63ebc988d0..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/store.go +++ /dev/null @@ -1,115 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "time" - - "github.com/google/uuid" - "github.com/jackc/pgx/v5/pgtype" - - sqlcdb "github.com/e2b-dev/infra/packages/db/client" - authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" - authqueries "github.com/e2b-dev/infra/packages/db/pkg/auth/queries" - "github.com/e2b-dev/infra/packages/db/queries" -) - -type QueueItem struct { - ID int64 - UserID uuid.UUID - Operation string - CreatedAt time.Time - AttemptCount int32 -} - -type AuthUser struct { - ID uuid.UUID - Email string -} - -type Store struct { - authQueries *authqueries.Queries - mainQueries *queries.Queries -} - -var _ workerStore = (*Store)(nil) - -func NewStore(authDB *authdb.Client, mainDB *sqlcdb.Client) *Store { - return &Store{ - authQueries: authDB.Write, - mainQueries: mainDB.Queries, - } -} - -func (s *Store) ClaimBatch(ctx context.Context, lockOwner string, lockTimeout time.Duration, batchSize int32) ([]QueueItem, error) { - rows, err := s.authQueries.ClaimUserSyncQueueBatch(ctx, authqueries.ClaimUserSyncQueueBatchParams{ - LockOwner: lockOwner, - LockTimeout: durationToInterval(lockTimeout), - BatchSize: batchSize, - }) - if err != nil { - return nil, err - } - - items := make([]QueueItem, len(rows)) - for i, r := range rows { - items[i] = QueueItem{ - ID: r.ID, - UserID: r.UserID, - Operation: r.Operation, - CreatedAt: r.CreatedAt, - AttemptCount: r.AttemptCount, - } - } - - return items, nil -} - -func (s *Store) AckBatch(ctx context.Context, ids []int64) error { - if len(ids) == 0 { - return nil - } - - return s.authQueries.AckUserSyncQueueItems(ctx, ids) -} - -func (s *Store) Retry(ctx context.Context, id int64, backoff time.Duration, lastError string) error { - return s.authQueries.RetryUserSyncQueueItem(ctx, authqueries.RetryUserSyncQueueItemParams{ - ID: id, - Backoff: durationToInterval(backoff), - LastError: lastError, - }) -} - -func (s *Store) DeadLetter(ctx context.Context, id int64, lastError string) error { - return s.authQueries.DeadLetterUserSyncQueueItem(ctx, authqueries.DeadLetterUserSyncQueueItemParams{ - ID: id, - LastError: lastError, - }) -} - -func (s *Store) GetAuthUser(ctx context.Context, userID uuid.UUID) (*AuthUser, error) { - row, err := s.authQueries.GetAuthUserByID(ctx, userID) - if err != nil { - return nil, err - } - - return &AuthUser{ID: row.ID, Email: row.Email}, nil -} - -func (s *Store) UpsertPublicUser(ctx context.Context, id uuid.UUID, email string) error { - return s.mainQueries.UpsertPublicUser(ctx, queries.UpsertPublicUserParams{ - ID: id, - Email: email, - }) -} - -func (s *Store) DeletePublicUser(ctx context.Context, id uuid.UUID) error { - return s.mainQueries.DeletePublicUser(ctx, id) -} - -func durationToInterval(d time.Duration) pgtype.Interval { - return pgtype.Interval{ - Microseconds: d.Microseconds(), - Valid: true, - } -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go deleted file mode 100644 index de60dda43a..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/supervisor.go +++ /dev/null @@ -1,117 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "errors" - "fmt" - "runtime/debug" - "time" - - "go.uber.org/zap" - - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -const ( - defaultRestartDelay = time.Second - maxRestartDelay = 30 * time.Second - healthyRunResetThreshold = time.Minute -) - -type supervisorConfig struct { - RestartDelay time.Duration - MaxRestartDelay time.Duration - HealthyRunResetAfter time.Duration -} - -func defaultSupervisorConfig() supervisorConfig { - return supervisorConfig{ - RestartDelay: defaultRestartDelay, - MaxRestartDelay: maxRestartDelay, - HealthyRunResetAfter: healthyRunResetThreshold, - } -} - -func (r *Runner) RunWithRestart(ctx context.Context) error { - return supervise(ctx, r.l, defaultSupervisorConfig(), r.Run) -} - -func supervise(ctx context.Context, l logger.Logger, cfg supervisorConfig, run func(context.Context) error) error { - restartAttempt := 0 - - for { - startedAt := time.Now() - err := runRecovering(ctx, l, run) - runtime := time.Since(startedAt) - - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return err - } - if ctx.Err() != nil { - return ctx.Err() - } - - if runtime >= cfg.HealthyRunResetAfter { - restartAttempt = 0 - } - restartAttempt++ - - delay := restartBackoff(restartAttempt, cfg.RestartDelay, cfg.MaxRestartDelay) - l.Error(ctx, "supabase auth user sync worker exited unexpectedly; restarting", - zap.Error(err), - zap.Int("worker.restart_attempt", restartAttempt), - zap.Duration("worker.restart_in", delay), - zap.Duration("worker.runtime", runtime), - zap.Duration("worker.healthy_run_reset_after", cfg.HealthyRunResetAfter), - ) - - timer := time.NewTimer(delay) - select { - case <-ctx.Done(): - timer.Stop() - - return ctx.Err() - case <-timer.C: - } - } -} - -func runRecovering(ctx context.Context, l logger.Logger, run func(context.Context) error) (err error) { - defer func() { - if recovered := recover(); recovered != nil { - l.Error(ctx, "supabase auth user sync worker panicked", - zap.String("worker.panic", fmt.Sprint(recovered)), - zap.String("worker.stack", string(debug.Stack())), - ) - - err = fmt.Errorf("worker panic: %v", recovered) - } - }() - - err = run(ctx) - if err == nil && ctx.Err() == nil { - return errors.New("worker exited without error") - } - - return err -} - -func restartBackoff(attempt int, base time.Duration, maxDelay time.Duration) time.Duration { - if base <= 0 { - base = defaultRestartDelay - } - if maxDelay < base { - maxDelay = base - } - - delay := base - for i := 1; i < attempt; i++ { - if delay >= maxDelay/2 { - return maxDelay - } - - delay *= 2 - } - - return delay -} diff --git a/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go b/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go deleted file mode 100644 index fc53f77ba4..0000000000 --- a/packages/dashboard-api/internal/supabaseauthusersync/supervisor_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package supabaseauthusersync - -import ( - "context" - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/stretchr/testify/require" - - "github.com/e2b-dev/infra/packages/shared/pkg/logger" -) - -func TestSuperviseRestartsAfterUnexpectedError(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var runs atomic.Int32 - errCh := make(chan error, 1) - - go func() { - errCh <- supervise(ctx, logger.NewNopLogger(), supervisorConfig{ - RestartDelay: time.Millisecond, - MaxRestartDelay: time.Millisecond, - HealthyRunResetAfter: time.Hour, - }, func(ctx context.Context) error { - attempt := runs.Add(1) - if attempt < 3 { - return errors.New("boom") - } - - cancel() - <-ctx.Done() - - return ctx.Err() - }) - }() - - err := <-errCh - require.ErrorIs(t, err, context.Canceled) - require.Equal(t, int32(3), runs.Load()) -} - -func TestSuperviseRestartsAfterPanic(t *testing.T) { - t.Parallel() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - var runs atomic.Int32 - errCh := make(chan error, 1) - - go func() { - errCh <- supervise(ctx, logger.NewNopLogger(), supervisorConfig{ - RestartDelay: time.Millisecond, - MaxRestartDelay: time.Millisecond, - HealthyRunResetAfter: time.Hour, - }, func(ctx context.Context) error { - attempt := runs.Add(1) - if attempt == 1 { - panic("boom") - } - - cancel() - <-ctx.Done() - - return ctx.Err() - }) - }() - - err := <-errCh - require.ErrorIs(t, err, context.Canceled) - require.Equal(t, int32(2), runs.Load()) -} diff --git a/packages/dashboard-api/main.go b/packages/dashboard-api/main.go index f1a75eae72..e6df2d9eb4 100644 --- a/packages/dashboard-api/main.go +++ b/packages/dashboard-api/main.go @@ -20,7 +20,9 @@ import ( "github.com/gin-contrib/cors" "github.com/gin-gonic/gin" "github.com/google/uuid" + "github.com/jackc/pgx/v5" middleware "github.com/oapi-codegen/gin-middleware" + "github.com/riverqueue/river" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -28,9 +30,9 @@ import ( "github.com/e2b-dev/infra/packages/auth/pkg/types" clickhouse "github.com/e2b-dev/infra/packages/clickhouse/pkg" "github.com/e2b-dev/infra/packages/dashboard-api/internal/api" + "github.com/e2b-dev/infra/packages/dashboard-api/internal/backgroundworker" "github.com/e2b-dev/infra/packages/dashboard-api/internal/cfg" "github.com/e2b-dev/infra/packages/dashboard-api/internal/handlers" - "github.com/e2b-dev/infra/packages/dashboard-api/internal/supabaseauthusersync" sqlcdb "github.com/e2b-dev/infra/packages/db/client" authdb "github.com/e2b-dev/infra/packages/db/pkg/auth" "github.com/e2b-dev/infra/packages/db/pkg/pool" @@ -230,24 +232,29 @@ func run() int { wg := sync.WaitGroup{} + var riverClient *river.Client[pgx.Tx] + if config.SupabaseAuthUserSyncEnabled { workerLogger := l.With(zap.String("worker", "supabase_auth_user_sync")) - syncConfig := supabaseauthusersync.DefaultConfig() - syncConfig.Enabled = true - syncRunner := supabaseauthusersync.NewRunner( - syncConfig, - authDB, - db, - serviceInstanceID, - workerLogger, - ) - - wg.Go(func() { - if err := syncRunner.RunWithRestart(signalCtx); err != nil && !errors.Is(err, context.Canceled) { - l.Error(ctx, "supabase auth user sync worker error", zap.Error(err)) - errorCode.Add(1) - } - }) + + authPool := authDB.WritePool() + if err := backgroundworker.RunRiverMigrations(ctx, authPool); err != nil { + l.Fatal(ctx, "failed to run River migrations on auth DB", zap.Error(err)) + } + + workers := river.NewWorkers() + river.AddWorker(workers, backgroundworker.NewAuthUserSyncWorker(db, workerLogger)) + + riverClient, err = backgroundworker.NewRiverClient(authPool, workers) + if err != nil { + l.Fatal(ctx, "failed to create River client", zap.Error(err)) + } + + if err := riverClient.Start(signalCtx); err != nil { + l.Fatal(ctx, "failed to start River client", zap.Error(err)) + } + + l.Info(ctx, "background worker started (River auth_custom)", zap.String("queue", "auth_sync")) } wg.Go(func() { @@ -257,6 +264,14 @@ func run() int { shutdownCtx, shutdownCancel := context.WithTimeout(context.WithoutCancel(ctx), 30*time.Second) defer shutdownCancel() + if riverClient != nil { + if err := riverClient.Stop(shutdownCtx); err != nil { + l.Error(ctx, "River client shutdown error", zap.Error(err)) + + errorCode.Add(1) + } + } + if err := s.Shutdown(shutdownCtx); err != nil { l.Error(ctx, "HTTP server shutdown error", zap.Error(err)) diff --git a/packages/db/pkg/auth/client.go b/packages/db/pkg/auth/client.go index 02a1dcf2a0..fe9f33d666 100644 --- a/packages/db/pkg/auth/client.go +++ b/packages/db/pkg/auth/client.go @@ -60,6 +60,10 @@ func (db *Client) Close() error { return nil } +func (db *Client) WritePool() *pgxpool.Pool { + return db.writeConn +} + // WithTx runs the given function in a transaction. func (db *Client) WithTx(ctx context.Context) (*authqueries.Queries, pgx.Tx, error) { tx, err := db.writeConn.BeginTx(ctx, pgx.TxOptions{}) diff --git a/packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql b/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql similarity index 66% rename from packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql rename to packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql index e300948803..3d41c6d733 100644 --- a/packages/db/pkg/auth/migrations/20260328000001_dashboard_supabase_auth_user_sync_queue.sql +++ b/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql @@ -1,71 +1,66 @@ -- +goose Up -- +goose StatementBegin -CREATE TABLE public.user_sync_queue ( - id BIGSERIAL PRIMARY KEY, - user_id UUID NOT NULL, - operation TEXT NOT NULL CHECK (operation IN ('upsert', 'delete')), - created_at TIMESTAMPTZ NOT NULL DEFAULT now(), - next_attempt_at TIMESTAMPTZ NOT NULL DEFAULT now(), - locked_at TIMESTAMPTZ NULL, - lock_owner TEXT NULL, - attempt_count INT NOT NULL DEFAULT 0, - last_error TEXT NULL, - dead_lettered_at TIMESTAMPTZ NULL -); - -ALTER TABLE public.user_sync_queue ENABLE ROW LEVEL SECURITY; - -CREATE INDEX auth_user_sync_queue_pending_idx - ON public.user_sync_queue (id) - WHERE dead_lettered_at IS NULL AND locked_at IS NULL; - -CREATE INDEX auth_user_sync_queue_user_idx - ON public.user_sync_queue (user_id); - -GRANT INSERT ON public.user_sync_queue TO trigger_user; -GRANT USAGE, SELECT ON SEQUENCE public.user_sync_queue_id_seq TO trigger_user; - -CREATE POLICY "Allow to create a user sync queue item" - ON public.user_sync_queue - AS PERMISSIVE - FOR INSERT - TO trigger_user - WITH CHECK (TRUE); + +CREATE SCHEMA IF NOT EXISTS auth_custom; CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (NEW.id, 'upsert'); + INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) + VALUES ( + jsonb_build_object('user_id', NEW.id, 'operation', 'upsert', 'email', NEW.email), + 'auth_user_sync', + 20, + 'auth_sync', + 'available' + ); + + PERFORM pg_notify('auth_custom.river_insert', '{"queue":"auth_sync"}'); RETURN NEW; END; -$func$ SECURITY DEFINER SET search_path = public; +$func$ SECURITY DEFINER SET search_path = public, auth_custom; CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN IF OLD.email IS DISTINCT FROM NEW.email THEN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (NEW.id, 'upsert'); + INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) + VALUES ( + jsonb_build_object('user_id', NEW.id, 'operation', 'upsert', 'email', NEW.email), + 'auth_user_sync', + 20, + 'auth_sync', + 'available' + ); + + PERFORM pg_notify('auth_custom.river_insert', '{"queue":"auth_sync"}'); END IF; RETURN NEW; END; -$func$ SECURITY DEFINER SET search_path = public; +$func$ SECURITY DEFINER SET search_path = public, auth_custom; CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER LANGUAGE plpgsql AS $func$ BEGIN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (OLD.id, 'delete'); + INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) + VALUES ( + jsonb_build_object('user_id', OLD.id, 'operation', 'delete'), + 'auth_user_sync', + 20, + 'auth_sync', + 'available' + ); + + PERFORM pg_notify('auth_custom.river_insert', '{"queue":"auth_sync"}'); RETURN OLD; END; -$func$ SECURITY DEFINER SET search_path = public; +$func$ SECURITY DEFINER SET search_path = public, auth_custom; ALTER FUNCTION public.sync_insert_auth_users_to_public_users_trigger() OWNER TO trigger_user; ALTER FUNCTION public.sync_update_auth_users_to_public_users_trigger() OWNER TO trigger_user; @@ -85,10 +80,16 @@ DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; CREATE TRIGGER sync_deletes_to_public_users AFTER DELETE ON auth.users FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); + +GRANT USAGE ON SCHEMA auth_custom TO trigger_user; +GRANT INSERT ON auth_custom.river_job TO trigger_user; +GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA auth_custom TO trigger_user; + -- +goose StatementEnd -- +goose Down -- +goose StatementBegin + DROP TRIGGER IF EXISTS sync_inserts_to_public_users ON auth.users; DROP TRIGGER IF EXISTS sync_updates_to_public_users ON auth.users; DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; @@ -97,8 +98,8 @@ CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger LANGUAGE plpgsql AS $func$ BEGIN - INSERT INTO public.users (id, email) - VALUES (NEW.id, NEW.email); + INSERT INTO public.user_sync_queue (user_id, operation) + VALUES (NEW.id, 'upsert'); RETURN NEW; END; @@ -108,13 +109,9 @@ CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger LANGUAGE plpgsql AS $func$ BEGIN - UPDATE public.users - SET email = NEW.email, - updated_at = now() - WHERE id = NEW.id; - - IF NOT FOUND THEN - RAISE EXCEPTION 'User with id % does not exist in public.users', NEW.id; + IF OLD.email IS DISTINCT FROM NEW.email THEN + INSERT INTO public.user_sync_queue (user_id, operation) + VALUES (NEW.id, 'upsert'); END IF; RETURN NEW; @@ -125,7 +122,8 @@ CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger LANGUAGE plpgsql AS $func$ BEGIN - DELETE FROM public.users WHERE id = OLD.id; + INSERT INTO public.user_sync_queue (user_id, operation) + VALUES (OLD.id, 'delete'); RETURN OLD; END; @@ -147,10 +145,8 @@ CREATE TRIGGER sync_deletes_to_public_users AFTER DELETE ON auth.users FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); -REVOKE INSERT ON public.user_sync_queue FROM trigger_user; -REVOKE USAGE, SELECT ON SEQUENCE public.user_sync_queue_id_seq FROM trigger_user; +REVOKE ALL ON SCHEMA auth_custom FROM trigger_user; -DROP POLICY IF EXISTS "Allow to create a user sync queue item" ON public.user_sync_queue; +DROP SCHEMA IF EXISTS auth_custom CASCADE; -DROP TABLE public.user_sync_queue; -- +goose StatementEnd diff --git a/packages/db/pkg/auth/queries/models.go b/packages/db/pkg/auth/queries/models.go index e32050f8ad..de91154ef2 100644 --- a/packages/db/pkg/auth/queries/models.go +++ b/packages/db/pkg/auth/queries/models.go @@ -220,19 +220,6 @@ type User struct { Email string } -type UserSyncQueue struct { - ID int64 - UserID uuid.UUID - Operation string - CreatedAt time.Time - NextAttemptAt time.Time - LockedAt *time.Time - LockOwner *string - AttemptCount int32 - LastError *string - DeadLetteredAt *time.Time -} - type UsersTeam struct { ID int64 UserID uuid.UUID diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql deleted file mode 100644 index 45dd7f6e49..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/ack_batch.sql +++ /dev/null @@ -1,3 +0,0 @@ --- name: AckUserSyncQueueItems :exec -DELETE FROM public.user_sync_queue -WHERE id = ANY(sqlc.arg(ids)::bigint[]); diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql deleted file mode 100644 index 08f35e662d..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/claim_batch.sql +++ /dev/null @@ -1,17 +0,0 @@ --- name: ClaimUserSyncQueueBatch :many -UPDATE public.user_sync_queue -SET - locked_at = now(), - lock_owner = sqlc.arg(lock_owner)::text, - attempt_count = attempt_count + 1 -WHERE id IN ( - SELECT id - FROM public.user_sync_queue - WHERE dead_lettered_at IS NULL - AND next_attempt_at <= now() - AND (locked_at IS NULL OR locked_at < now() - sqlc.arg(lock_timeout)::interval) - ORDER BY id - FOR UPDATE SKIP LOCKED - LIMIT sqlc.arg(batch_size)::int -) -RETURNING id, user_id, operation, created_at, attempt_count; diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql deleted file mode 100644 index a68d8cd363..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/dead_letter.sql +++ /dev/null @@ -1,8 +0,0 @@ --- name: DeadLetterUserSyncQueueItem :exec -UPDATE public.user_sync_queue -SET - locked_at = NULL, - lock_owner = NULL, - dead_lettered_at = now(), - last_error = sqlc.arg(last_error)::text -WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql deleted file mode 100644 index 414b3fe3cf..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/get_auth_user.sql +++ /dev/null @@ -1,4 +0,0 @@ --- name: GetAuthUserByID :one -SELECT id, email -FROM auth.users -WHERE id = sqlc.arg(user_id)::uuid; diff --git a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql b/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql deleted file mode 100644 index 5386fdce5e..0000000000 --- a/packages/db/pkg/auth/sql_queries/supabase_auth_user_sync/retry.sql +++ /dev/null @@ -1,8 +0,0 @@ --- name: RetryUserSyncQueueItem :exec -UPDATE public.user_sync_queue -SET - locked_at = NULL, - lock_owner = NULL, - next_attempt_at = now() + sqlc.arg(backoff)::interval, - last_error = sqlc.arg(last_error)::text -WHERE id = sqlc.arg(id)::bigint; diff --git a/packages/db/pkg/testutils/db.go b/packages/db/pkg/testutils/db.go index 3b1f8accb5..a9fd66e4c4 100644 --- a/packages/db/pkg/testutils/db.go +++ b/packages/db/pkg/testutils/db.go @@ -118,6 +118,22 @@ func SetupDatabase(t *testing.T) *Database { func (db *Database) ApplyMigrations(t *testing.T, migrationDirs ...string) { t.Helper() + db.applyGooseMigrations(t, 0, migrationDirs...) +} + +func (db *Database) ApplyMigrationsUpTo(t *testing.T, version int64, migrationDirs ...string) { + t.Helper() + + db.applyGooseMigrations(t, version, migrationDirs...) +} + +func (db *Database) ConnStr() string { + return db.connStr +} + +func (db *Database) applyGooseMigrations(t *testing.T, upToVersion int64, migrationDirs ...string) { + t.Helper() + cmd := exec.CommandContext(t.Context(), "git", "rev-parse", "--show-toplevel") output, err := cmd.Output() require.NoError(t, err, "Failed to find git root") @@ -134,13 +150,23 @@ func (db *Database) ApplyMigrations(t *testing.T, migrationDirs ...string) { }) for _, migrationsDir := range migrationDirs { - err = goose.RunWithOptionsContext( - t.Context(), - "up", - sqlDB, - filepath.Join(repoRoot, migrationsDir), - nil, - ) + if upToVersion > 0 { + err = goose.UpToContext( + t.Context(), + sqlDB, + filepath.Join(repoRoot, migrationsDir), + upToVersion, + ) + } else { + err = goose.RunWithOptionsContext( + t.Context(), + "up", + sqlDB, + filepath.Join(repoRoot, migrationsDir), + nil, + ) + } + require.NoError(t, err) } } From 1113654a2f8ec475e6e459a825779adff6b33968 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 1 Apr 2026 20:38:50 +0000 Subject: [PATCH 16/20] chore: auto-commit generated changes --- packages/api/go.mod | 4 ++-- packages/api/go.sum | 6 ++---- packages/auth/go.mod | 2 +- packages/auth/go.sum | 3 +-- packages/clickhouse/go.mod | 4 ++-- packages/clickhouse/go.sum | 6 ++---- packages/client-proxy/go.mod | 2 +- packages/client-proxy/go.sum | 3 +-- packages/dashboard-api/go.mod | 8 ++++---- packages/dashboard-api/go.sum | 7 +++---- .../internal/backgroundworker/auth_user_sync.go | 2 +- packages/db/go.mod | 4 ++-- packages/db/go.sum | 6 ++---- packages/docker-reverse-proxy/go.mod | 4 ++-- packages/docker-reverse-proxy/go.sum | 6 ++---- packages/envd/go.mod | 2 +- packages/envd/go.sum | 3 +-- packages/local-dev/go.mod | 2 +- packages/local-dev/go.sum | 3 +-- packages/orchestrator/go.mod | 2 +- packages/orchestrator/go.sum | 3 +-- packages/shared/go.mod | 2 +- packages/shared/go.sum | 3 +-- tests/integration/go.mod | 4 ++-- tests/integration/go.sum | 6 ++---- 25 files changed, 40 insertions(+), 57 deletions(-) diff --git a/packages/api/go.mod b/packages/api/go.mod index 9d197cf26d..8a9763fe98 100644 --- a/packages/api/go.mod +++ b/packages/api/go.mod @@ -35,7 +35,7 @@ require ( github.com/google/uuid v1.6.0 github.com/grafana/loki/v3 v3.6.4 github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/launchdarkly/go-sdk-common/v3 v3.3.0 github.com/launchdarkly/go-server-sdk/v7 v7.13.0 github.com/oapi-codegen/gin-middleware v1.0.2 @@ -381,7 +381,7 @@ require ( golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect golang.org/x/image v0.38.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.35.0 // indirect diff --git a/packages/api/go.sum b/packages/api/go.sum index 1d207bd808..5e33ec6518 100644 --- a/packages/api/go.sum +++ b/packages/api/go.sum @@ -557,8 +557,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE= @@ -1159,8 +1158,7 @@ golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/auth/go.mod b/packages/auth/go.mod index 27782f8e62..a545b30053 100644 --- a/packages/auth/go.mod +++ b/packages/auth/go.mod @@ -61,7 +61,7 @@ require ( github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jellydator/ttlcache/v3 v3.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/packages/auth/go.sum b/packages/auth/go.sum index 4b463c263c..6d03e5cbdb 100644 --- a/packages/auth/go.sum +++ b/packages/auth/go.sum @@ -114,8 +114,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= diff --git a/packages/clickhouse/go.mod b/packages/clickhouse/go.mod index e7f4c03e67..2b151949d7 100644 --- a/packages/clickhouse/go.mod +++ b/packages/clickhouse/go.mod @@ -39,7 +39,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/joho/godotenv v1.5.1 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect @@ -93,7 +93,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/clickhouse/go.sum b/packages/clickhouse/go.sum index af8e90ffb7..88ae0b3a51 100644 --- a/packages/clickhouse/go.sum +++ b/packages/clickhouse/go.sum @@ -128,8 +128,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -310,8 +309,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/client-proxy/go.mod b/packages/client-proxy/go.mod index 04bad15cce..0b041d6463 100644 --- a/packages/client-proxy/go.mod +++ b/packages/client-proxy/go.mod @@ -63,7 +63,7 @@ require ( go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/client-proxy/go.sum b/packages/client-proxy/go.sum index dd487c5288..dde1f4e9af 100644 --- a/packages/client-proxy/go.sum +++ b/packages/client-proxy/go.sum @@ -207,8 +207,7 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/dashboard-api/go.mod b/packages/dashboard-api/go.mod index 79457e0125..cb4fa8809d 100644 --- a/packages/dashboard-api/go.mod +++ b/packages/dashboard-api/go.mod @@ -23,7 +23,11 @@ require ( github.com/jackc/pgx/v5 v5.9.1 github.com/oapi-codegen/gin-middleware v1.0.2 github.com/oapi-codegen/runtime v1.1.1 + github.com/riverqueue/river v0.32.0 + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.41.0 + go.opentelemetry.io/otel/metric v1.41.0 go.uber.org/zap v1.27.1 ) @@ -113,9 +117,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/pressly/goose/v3 v3.26.0 // indirect github.com/redis/go-redis/v9 v9.17.3 // indirect - github.com/riverqueue/river v0.32.0 // indirect github.com/riverqueue/river/riverdriver v0.32.0 // indirect - github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 // indirect github.com/riverqueue/river/rivershared v0.32.0 // indirect github.com/riverqueue/river/rivertype v0.32.0 // indirect github.com/segmentio/asm v1.2.0 // indirect @@ -139,13 +141,11 @@ require ( go.opentelemetry.io/contrib/bridges/otelzap v0.14.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0 // indirect - go.opentelemetry.io/otel v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect go.opentelemetry.io/otel/log v0.15.0 // indirect - go.opentelemetry.io/otel/metric v1.41.0 // indirect go.opentelemetry.io/otel/sdk v1.41.0 // indirect go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect diff --git a/packages/dashboard-api/go.sum b/packages/dashboard-api/go.sum index d724d1c16c..7a0dcff727 100644 --- a/packages/dashboard-api/go.sum +++ b/packages/dashboard-api/go.sum @@ -139,8 +139,6 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= @@ -257,6 +255,8 @@ github.com/riverqueue/river/rivershared v0.32.0 h1:7DwdrppMU9uoU2iU9aGQiv91nBezj github.com/riverqueue/river/rivershared v0.32.0/go.mod h1:UE7GEj3zaTV3cKw7Q3angCozlNEGsL50xZBKJQ9m6zU= github.com/riverqueue/river/rivertype v0.32.0 h1:RW7uodfl86gYkjwDponTAPNnUqM+X6BjlsNHxbt6Ztg= github.com/riverqueue/river/rivertype v0.32.0/go.mod h1:D1Ad+EaZiaXbQbJcJcfeicXJMBKno0n6UcfKI5Q7DIQ= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= @@ -376,8 +376,7 @@ golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05 golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go index f999926ae2..b962bec78b 100644 --- a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go +++ b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go @@ -36,7 +36,7 @@ type AuthUserSyncWorker struct { } func NewAuthUserSyncWorker(mainDB *sqlcdb.Client, l logger.Logger) *AuthUserSyncWorker { - jobsCounter, err := otel.Meter("dashboard-api.backgroundworker.auth_user_sync").Int64Counter( + jobsCounter, err := otel.Meter("github.com/e2b-dev/infra/packages/dashboard-api/internal/backgroundworker") "dashboard_api.auth_user_sync.jobs_total", metric.WithDescription("Total auth user sync jobs by operation and result."), metric.WithUnit("{job}"), diff --git a/packages/db/go.mod b/packages/db/go.mod index 22cbf8515a..f69bcb99b5 100644 --- a/packages/db/go.mod +++ b/packages/db/go.mod @@ -14,7 +14,7 @@ require ( github.com/exaring/otelpgx v0.9.3 github.com/google/uuid v1.6.0 github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/lib/pq v1.11.2 github.com/pressly/goose/v3 v3.26.0 github.com/stretchr/testify v1.11.1 @@ -140,7 +140,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/db/go.sum b/packages/db/go.sum index b21cf6fa1b..3a807c79e1 100644 --- a/packages/db/go.sum +++ b/packages/db/go.sum @@ -175,8 +175,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -424,8 +423,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/docker-reverse-proxy/go.mod b/packages/docker-reverse-proxy/go.mod index 2f663be82b..c39a14622f 100644 --- a/packages/docker-reverse-proxy/go.mod +++ b/packages/docker-reverse-proxy/go.mod @@ -40,7 +40,7 @@ require ( github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/klauspost/compress v1.18.2 // indirect github.com/lib/pq v1.11.2 // indirect @@ -79,7 +79,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/docker-reverse-proxy/go.sum b/packages/docker-reverse-proxy/go.sum index 9d0c348df3..4a13a06795 100644 --- a/packages/docker-reverse-proxy/go.sum +++ b/packages/docker-reverse-proxy/go.sum @@ -69,8 +69,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -189,8 +188,7 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/envd/go.mod b/packages/envd/go.mod index 6175013054..4d86a8140b 100644 --- a/packages/envd/go.mod +++ b/packages/envd/go.mod @@ -73,7 +73,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/text v0.35.0 // indirect diff --git a/packages/envd/go.sum b/packages/envd/go.sum index 0834f4de21..737d5e4e69 100644 --- a/packages/envd/go.sum +++ b/packages/envd/go.sum @@ -213,8 +213,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/local-dev/go.mod b/packages/local-dev/go.mod index bd14d7b6ba..8429d7768d 100644 --- a/packages/local-dev/go.mod +++ b/packages/local-dev/go.mod @@ -10,7 +10,7 @@ require ( github.com/e2b-dev/infra/packages/db v0.0.0 github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/google/uuid v1.6.0 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/pressly/goose/v3 v3.26.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 diff --git a/packages/local-dev/go.sum b/packages/local-dev/go.sum index 5878ebb18b..4496ed8763 100644 --- a/packages/local-dev/go.sum +++ b/packages/local-dev/go.sum @@ -69,8 +69,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= diff --git a/packages/orchestrator/go.mod b/packages/orchestrator/go.mod index cd896e8d7a..0d5dde3fe9 100644 --- a/packages/orchestrator/go.mod +++ b/packages/orchestrator/go.mod @@ -309,7 +309,7 @@ require ( golang.org/x/arch v0.18.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/term v0.40.0 // indirect diff --git a/packages/orchestrator/go.sum b/packages/orchestrator/go.sum index f75731182b..4416ad76b9 100644 --- a/packages/orchestrator/go.sum +++ b/packages/orchestrator/go.sum @@ -1428,8 +1428,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/shared/go.mod b/packages/shared/go.mod index 093aa0ae83..e1d9394cc7 100644 --- a/packages/shared/go.mod +++ b/packages/shared/go.mod @@ -54,7 +54,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.41.0 go.opentelemetry.io/otel/trace v1.41.0 go.uber.org/zap v1.27.1 - golang.org/x/mod v0.33.0 + golang.org/x/mod v0.34.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.20.0 google.golang.org/api v0.257.0 diff --git a/packages/shared/go.sum b/packages/shared/go.sum index a58712d7d6..c4e534ef81 100644 --- a/packages/shared/go.sum +++ b/packages/shared/go.sum @@ -1023,8 +1023,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/tests/integration/go.mod b/tests/integration/go.mod index a0e9a6bb59..7a4019b2de 100644 --- a/tests/integration/go.mod +++ b/tests/integration/go.mod @@ -25,7 +25,7 @@ require ( github.com/e2b-dev/infra/packages/envd v0.0.0-00010101000000-000000000000 github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/google/uuid v1.6.0 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/oapi-codegen/runtime v1.1.1 github.com/stretchr/testify v1.11.1 golang.org/x/sync v0.20.0 @@ -176,7 +176,7 @@ require ( go.uber.org/zap v1.27.1 // indirect golang.org/x/arch v0.18.0 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/term v0.40.0 // indirect diff --git a/tests/integration/go.sum b/tests/integration/go.sum index 83d28a5089..a26fb9f43a 100644 --- a/tests/integration/go.sum +++ b/tests/integration/go.sum @@ -182,8 +182,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -447,8 +446,7 @@ golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkN golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From fb0afd468e591857742206d17ea579de252462c6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Wed, 1 Apr 2026 20:44:00 +0000 Subject: [PATCH 17/20] chore: auto-commit generated changes --- packages/api/go.sum | 2 ++ packages/auth/go.sum | 1 + packages/clickhouse/go.sum | 2 ++ packages/client-proxy/go.sum | 1 + packages/db/go.sum | 2 ++ packages/docker-reverse-proxy/go.sum | 2 ++ packages/envd/go.sum | 1 + packages/local-dev/go.sum | 1 + packages/orchestrator/go.sum | 1 + packages/shared/go.sum | 1 + tests/integration/go.sum | 2 ++ 11 files changed, 16 insertions(+) diff --git a/packages/api/go.sum b/packages/api/go.sum index 5e33ec6518..f1c7dc11cf 100644 --- a/packages/api/go.sum +++ b/packages/api/go.sum @@ -558,6 +558,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE= @@ -1159,6 +1160,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/auth/go.sum b/packages/auth/go.sum index 6d03e5cbdb..d0e8192d66 100644 --- a/packages/auth/go.sum +++ b/packages/auth/go.sum @@ -115,6 +115,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= diff --git a/packages/clickhouse/go.sum b/packages/clickhouse/go.sum index 88ae0b3a51..c290bd8497 100644 --- a/packages/clickhouse/go.sum +++ b/packages/clickhouse/go.sum @@ -129,6 +129,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -310,6 +311,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/client-proxy/go.sum b/packages/client-proxy/go.sum index dde1f4e9af..a8aa57bff1 100644 --- a/packages/client-proxy/go.sum +++ b/packages/client-proxy/go.sum @@ -208,6 +208,7 @@ golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVo golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/db/go.sum b/packages/db/go.sum index 3a807c79e1..b3908e0377 100644 --- a/packages/db/go.sum +++ b/packages/db/go.sum @@ -176,6 +176,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -424,6 +425,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/docker-reverse-proxy/go.sum b/packages/docker-reverse-proxy/go.sum index 4a13a06795..fa1e0b24f5 100644 --- a/packages/docker-reverse-proxy/go.sum +++ b/packages/docker-reverse-proxy/go.sum @@ -70,6 +70,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -189,6 +190,7 @@ golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVo golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/envd/go.sum b/packages/envd/go.sum index 737d5e4e69..fa0212436b 100644 --- a/packages/envd/go.sum +++ b/packages/envd/go.sum @@ -214,6 +214,7 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/local-dev/go.sum b/packages/local-dev/go.sum index 4496ed8763..def89b00d0 100644 --- a/packages/local-dev/go.sum +++ b/packages/local-dev/go.sum @@ -70,6 +70,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= diff --git a/packages/orchestrator/go.sum b/packages/orchestrator/go.sum index 4416ad76b9..17645baedb 100644 --- a/packages/orchestrator/go.sum +++ b/packages/orchestrator/go.sum @@ -1429,6 +1429,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/shared/go.sum b/packages/shared/go.sum index c4e534ef81..69f3c15d7f 100644 --- a/packages/shared/go.sum +++ b/packages/shared/go.sum @@ -1024,6 +1024,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/tests/integration/go.sum b/tests/integration/go.sum index a26fb9f43a..4ef6ca9f30 100644 --- a/tests/integration/go.sum +++ b/tests/integration/go.sum @@ -183,6 +183,7 @@ github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5ey github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= +github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -447,6 +448,7 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= +golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From bd4dd80223d12df4cdd969317ce6efc640e22a9c Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Wed, 1 Apr 2026 15:17:50 -0700 Subject: [PATCH 18/20] chore: update pgx dependency to v5.9.1 and golang.org/x/mod to v0.34.0 across multiple packages --- packages/api/go.mod | 4 +- packages/api/go.sum | 6 +- packages/auth/go.mod | 2 +- packages/auth/go.sum | 3 +- packages/clickhouse/go.mod | 4 +- packages/clickhouse/go.sum | 6 +- packages/client-proxy/go.mod | 2 +- packages/client-proxy/go.sum | 3 +- packages/dashboard-api/go.mod | 8 +- packages/dashboard-api/go.sum | 7 +- packages/db/go.mod | 4 +- packages/db/go.sum | 6 +- ...0260401000001_river_auth_custom_schema.sql | 13 +++ ...01000003_river_auth_user_sync_triggers.sql | 107 +++++------------- packages/docker-reverse-proxy/go.mod | 4 +- packages/docker-reverse-proxy/go.sum | 6 +- packages/envd/go.mod | 2 +- packages/envd/go.sum | 3 +- packages/local-dev/go.mod | 2 +- packages/local-dev/go.sum | 3 +- packages/orchestrator/go.mod | 2 +- packages/orchestrator/go.sum | 3 +- packages/shared/go.mod | 2 +- packages/shared/go.sum | 3 +- tests/integration/go.mod | 4 +- tests/integration/go.sum | 6 +- 26 files changed, 79 insertions(+), 136 deletions(-) create mode 100644 packages/db/pkg/auth/migrations/20260401000001_river_auth_custom_schema.sql diff --git a/packages/api/go.mod b/packages/api/go.mod index 9d197cf26d..8a9763fe98 100644 --- a/packages/api/go.mod +++ b/packages/api/go.mod @@ -35,7 +35,7 @@ require ( github.com/google/uuid v1.6.0 github.com/grafana/loki/v3 v3.6.4 github.com/hashicorp/nomad/api v0.0.0-20251216171439-1dee0671280e - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/launchdarkly/go-sdk-common/v3 v3.3.0 github.com/launchdarkly/go-server-sdk/v7 v7.13.0 github.com/oapi-codegen/gin-middleware v1.0.2 @@ -381,7 +381,7 @@ require ( golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect golang.org/x/image v0.38.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.35.0 // indirect diff --git a/packages/api/go.sum b/packages/api/go.sum index 1d207bd808..5e33ec6518 100644 --- a/packages/api/go.sum +++ b/packages/api/go.sum @@ -557,8 +557,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jaegertracing/jaeger-idl v0.5.0 h1:zFXR5NL3Utu7MhPg8ZorxtCBjHrL3ReM1VoB65FOFGE= @@ -1159,8 +1158,7 @@ golang.org/x/image v0.38.0/go.mod h1:/3f6vaXC+6CEanU4KJxbcUZyEePbyKbaLoDOe4ehFYY golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/auth/go.mod b/packages/auth/go.mod index 27782f8e62..a545b30053 100644 --- a/packages/auth/go.mod +++ b/packages/auth/go.mod @@ -61,7 +61,7 @@ require ( github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/jellydator/ttlcache/v3 v3.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect diff --git a/packages/auth/go.sum b/packages/auth/go.sum index 4b463c263c..6d03e5cbdb 100644 --- a/packages/auth/go.sum +++ b/packages/auth/go.sum @@ -114,8 +114,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= diff --git a/packages/clickhouse/go.mod b/packages/clickhouse/go.mod index e7f4c03e67..2b151949d7 100644 --- a/packages/clickhouse/go.mod +++ b/packages/clickhouse/go.mod @@ -39,7 +39,7 @@ require ( github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/joho/godotenv v1.5.1 // indirect github.com/jonboulle/clockwork v0.5.0 // indirect @@ -93,7 +93,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/clickhouse/go.sum b/packages/clickhouse/go.sum index af8e90ffb7..88ae0b3a51 100644 --- a/packages/clickhouse/go.sum +++ b/packages/clickhouse/go.sum @@ -128,8 +128,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -310,8 +309,7 @@ golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvx golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/client-proxy/go.mod b/packages/client-proxy/go.mod index 04bad15cce..0b041d6463 100644 --- a/packages/client-proxy/go.mod +++ b/packages/client-proxy/go.mod @@ -63,7 +63,7 @@ require ( go.opentelemetry.io/proto/otlp v1.9.0 // indirect go.uber.org/multierr v1.11.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/client-proxy/go.sum b/packages/client-proxy/go.sum index dd487c5288..dde1f4e9af 100644 --- a/packages/client-proxy/go.sum +++ b/packages/client-proxy/go.sum @@ -207,8 +207,7 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/dashboard-api/go.mod b/packages/dashboard-api/go.mod index 79457e0125..cb4fa8809d 100644 --- a/packages/dashboard-api/go.mod +++ b/packages/dashboard-api/go.mod @@ -23,7 +23,11 @@ require ( github.com/jackc/pgx/v5 v5.9.1 github.com/oapi-codegen/gin-middleware v1.0.2 github.com/oapi-codegen/runtime v1.1.1 + github.com/riverqueue/river v0.32.0 + github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 github.com/stretchr/testify v1.11.1 + go.opentelemetry.io/otel v1.41.0 + go.opentelemetry.io/otel/metric v1.41.0 go.uber.org/zap v1.27.1 ) @@ -113,9 +117,7 @@ require ( github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/pressly/goose/v3 v3.26.0 // indirect github.com/redis/go-redis/v9 v9.17.3 // indirect - github.com/riverqueue/river v0.32.0 // indirect github.com/riverqueue/river/riverdriver v0.32.0 // indirect - github.com/riverqueue/river/riverdriver/riverpgxv5 v0.32.0 // indirect github.com/riverqueue/river/rivershared v0.32.0 // indirect github.com/riverqueue/river/rivertype v0.32.0 // indirect github.com/segmentio/asm v1.2.0 // indirect @@ -139,13 +141,11 @@ require ( go.opentelemetry.io/contrib/bridges/otelzap v0.14.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0 // indirect - go.opentelemetry.io/otel v1.41.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.15.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 // indirect go.opentelemetry.io/otel/log v0.15.0 // indirect - go.opentelemetry.io/otel/metric v1.41.0 // indirect go.opentelemetry.io/otel/sdk v1.41.0 // indirect go.opentelemetry.io/otel/sdk/log v0.15.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.41.0 // indirect diff --git a/packages/dashboard-api/go.sum b/packages/dashboard-api/go.sum index d724d1c16c..7a0dcff727 100644 --- a/packages/dashboard-api/go.sum +++ b/packages/dashboard-api/go.sum @@ -139,8 +139,6 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/pgx/v5 v5.9.1/go.mod h1:mal1tBGAFfLHvZzaYh77YS/eC6IX9OWbRV1QIIM0Jn4= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= @@ -257,6 +255,8 @@ github.com/riverqueue/river/rivershared v0.32.0 h1:7DwdrppMU9uoU2iU9aGQiv91nBezj github.com/riverqueue/river/rivershared v0.32.0/go.mod h1:UE7GEj3zaTV3cKw7Q3angCozlNEGsL50xZBKJQ9m6zU= github.com/riverqueue/river/rivertype v0.32.0 h1:RW7uodfl86gYkjwDponTAPNnUqM+X6BjlsNHxbt6Ztg= github.com/riverqueue/river/rivertype v0.32.0/go.mod h1:D1Ad+EaZiaXbQbJcJcfeicXJMBKno0n6UcfKI5Q7DIQ= +github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= +github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/segmentio/asm v1.2.0 h1:9BQrFxC+YOHJlTlHGkTrFWf59nbL3XnCoFLTwDCI7ys= @@ -376,8 +376,7 @@ golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05 golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/mod v0.34.0/go.mod h1:ykgH52iCZe79kzLLMhyCUzhMci+nQj+0XkbXpNYtVjY= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/db/go.mod b/packages/db/go.mod index 22cbf8515a..f69bcb99b5 100644 --- a/packages/db/go.mod +++ b/packages/db/go.mod @@ -14,7 +14,7 @@ require ( github.com/exaring/otelpgx v0.9.3 github.com/google/uuid v1.6.0 github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/lib/pq v1.11.2 github.com/pressly/goose/v3 v3.26.0 github.com/stretchr/testify v1.11.1 @@ -140,7 +140,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/db/go.sum b/packages/db/go.sum index b21cf6fa1b..3a807c79e1 100644 --- a/packages/db/go.sum +++ b/packages/db/go.sum @@ -175,8 +175,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= @@ -424,8 +423,7 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/db/pkg/auth/migrations/20260401000001_river_auth_custom_schema.sql b/packages/db/pkg/auth/migrations/20260401000001_river_auth_custom_schema.sql new file mode 100644 index 0000000000..73afedfbe3 --- /dev/null +++ b/packages/db/pkg/auth/migrations/20260401000001_river_auth_custom_schema.sql @@ -0,0 +1,13 @@ +-- +goose Up +-- +goose StatementBegin + +CREATE SCHEMA IF NOT EXISTS auth_custom; + +-- +goose StatementEnd + +-- +goose Down +-- +goose StatementBegin + +/* We don't want to drop the schema, as it is used by other services. */ + +-- +goose StatementEnd diff --git a/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql b/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql index 3d41c6d733..ba3ee8737c 100644 --- a/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql +++ b/packages/db/pkg/auth/migrations/20260401000003_river_auth_user_sync_triggers.sql @@ -1,11 +1,11 @@ -- +goose Up -- +goose StatementBegin -CREATE SCHEMA IF NOT EXISTS auth_custom; - -CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER +CREATE OR REPLACE FUNCTION auth_custom.enqueue_user_sync_on_insert() +RETURNS TRIGGER LANGUAGE plpgsql -AS $func$ +SECURITY DEFINER SET search_path = '' +AS $$ BEGIN INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) VALUES ( @@ -20,11 +20,13 @@ BEGIN RETURN NEW; END; -$func$ SECURITY DEFINER SET search_path = public, auth_custom; +$$; -CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER +CREATE OR REPLACE FUNCTION auth_custom.enqueue_user_sync_on_update() +RETURNS TRIGGER LANGUAGE plpgsql -AS $func$ +SECURITY DEFINER SET search_path = '' +AS $$ BEGIN IF OLD.email IS DISTINCT FROM NEW.email THEN INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) @@ -41,11 +43,13 @@ BEGIN RETURN NEW; END; -$func$ SECURITY DEFINER SET search_path = public, auth_custom; +$$; -CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER +CREATE OR REPLACE FUNCTION auth_custom.enqueue_user_sync_on_delete() +RETURNS TRIGGER LANGUAGE plpgsql -AS $func$ +SECURITY DEFINER SET search_path = '' +AS $$ BEGIN INSERT INTO auth_custom.river_job (args, kind, max_attempts, queue, state) VALUES ( @@ -60,28 +64,20 @@ BEGIN RETURN OLD; END; -$func$ SECURITY DEFINER SET search_path = public, auth_custom; - -ALTER FUNCTION public.sync_insert_auth_users_to_public_users_trigger() OWNER TO trigger_user; -ALTER FUNCTION public.sync_update_auth_users_to_public_users_trigger() OWNER TO trigger_user; -ALTER FUNCTION public.sync_delete_auth_users_to_public_users_trigger() OWNER TO trigger_user; +$$; -DROP TRIGGER IF EXISTS sync_inserts_to_public_users ON auth.users; -CREATE TRIGGER sync_inserts_to_public_users +CREATE TRIGGER enqueue_user_sync_on_insert AFTER INSERT ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_insert_auth_users_to_public_users_trigger(); + FOR EACH ROW EXECUTE FUNCTION auth_custom.enqueue_user_sync_on_insert(); -DROP TRIGGER IF EXISTS sync_updates_to_public_users ON auth.users; -CREATE TRIGGER sync_updates_to_public_users +CREATE TRIGGER enqueue_user_sync_on_update AFTER UPDATE ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_update_auth_users_to_public_users_trigger(); + FOR EACH ROW EXECUTE FUNCTION auth_custom.enqueue_user_sync_on_update(); -DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; -CREATE TRIGGER sync_deletes_to_public_users +CREATE TRIGGER enqueue_user_sync_on_delete AFTER DELETE ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); + FOR EACH ROW EXECUTE FUNCTION auth_custom.enqueue_user_sync_on_delete(); -GRANT USAGE ON SCHEMA auth_custom TO trigger_user; GRANT INSERT ON auth_custom.river_job TO trigger_user; GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA auth_custom TO trigger_user; @@ -90,63 +86,14 @@ GRANT USAGE, SELECT ON ALL SEQUENCES IN SCHEMA auth_custom TO trigger_user; -- +goose Down -- +goose StatementBegin -DROP TRIGGER IF EXISTS sync_inserts_to_public_users ON auth.users; -DROP TRIGGER IF EXISTS sync_updates_to_public_users ON auth.users; -DROP TRIGGER IF EXISTS sync_deletes_to_public_users ON auth.users; - -CREATE OR REPLACE FUNCTION public.sync_insert_auth_users_to_public_users_trigger() RETURNS TRIGGER -LANGUAGE plpgsql -AS $func$ -BEGIN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (NEW.id, 'upsert'); - - RETURN NEW; -END; -$func$ SECURITY DEFINER SET search_path = public; +DROP TRIGGER IF EXISTS enqueue_user_sync_on_insert ON auth.users; +DROP TRIGGER IF EXISTS enqueue_user_sync_on_update ON auth.users; +DROP TRIGGER IF EXISTS enqueue_user_sync_on_delete ON auth.users; -CREATE OR REPLACE FUNCTION public.sync_update_auth_users_to_public_users_trigger() RETURNS TRIGGER -LANGUAGE plpgsql -AS $func$ -BEGIN - IF OLD.email IS DISTINCT FROM NEW.email THEN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (NEW.id, 'upsert'); - END IF; - - RETURN NEW; -END; -$func$ SECURITY DEFINER SET search_path = public; - -CREATE OR REPLACE FUNCTION public.sync_delete_auth_users_to_public_users_trigger() RETURNS TRIGGER -LANGUAGE plpgsql -AS $func$ -BEGIN - INSERT INTO public.user_sync_queue (user_id, operation) - VALUES (OLD.id, 'delete'); - - RETURN OLD; -END; -$func$ SECURITY DEFINER SET search_path = public; - -ALTER FUNCTION public.sync_insert_auth_users_to_public_users_trigger() OWNER TO trigger_user; -ALTER FUNCTION public.sync_update_auth_users_to_public_users_trigger() OWNER TO trigger_user; -ALTER FUNCTION public.sync_delete_auth_users_to_public_users_trigger() OWNER TO trigger_user; - -CREATE TRIGGER sync_inserts_to_public_users - AFTER INSERT ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_insert_auth_users_to_public_users_trigger(); - -CREATE TRIGGER sync_updates_to_public_users - AFTER UPDATE ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_update_auth_users_to_public_users_trigger(); - -CREATE TRIGGER sync_deletes_to_public_users - AFTER DELETE ON auth.users - FOR EACH ROW EXECUTE FUNCTION public.sync_delete_auth_users_to_public_users_trigger(); +DROP FUNCTION IF EXISTS auth_custom.enqueue_user_sync_on_insert(); +DROP FUNCTION IF EXISTS auth_custom.enqueue_user_sync_on_update(); +DROP FUNCTION IF EXISTS auth_custom.enqueue_user_sync_on_delete(); REVOKE ALL ON SCHEMA auth_custom FROM trigger_user; -DROP SCHEMA IF EXISTS auth_custom CASCADE; - -- +goose StatementEnd diff --git a/packages/docker-reverse-proxy/go.mod b/packages/docker-reverse-proxy/go.mod index 2f663be82b..c39a14622f 100644 --- a/packages/docker-reverse-proxy/go.mod +++ b/packages/docker-reverse-proxy/go.mod @@ -40,7 +40,7 @@ require ( github.com/jackc/pgerrcode v0.0.0-20250907135507-afb5586c32a6 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect - github.com/jackc/pgx/v5 v5.7.5 // indirect + github.com/jackc/pgx/v5 v5.9.1 // indirect github.com/jackc/puddle/v2 v2.2.2 // indirect github.com/klauspost/compress v1.18.2 // indirect github.com/lib/pq v1.11.2 // indirect @@ -79,7 +79,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/sys v0.41.0 // indirect diff --git a/packages/docker-reverse-proxy/go.sum b/packages/docker-reverse-proxy/go.sum index 9d0c348df3..4a13a06795 100644 --- a/packages/docker-reverse-proxy/go.sum +++ b/packages/docker-reverse-proxy/go.sum @@ -69,8 +69,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -189,8 +188,7 @@ golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a h1:ovFr6Z0MNmU7nH8VaX5xqw+05ST2uO1exVfZPVqRC5o= golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkNpx3BUWUQksGO3JcVX6qIjytmA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.50.0 h1:ucWh9eiCGyDR3vtzso0WMQinm2Dnt8cFMuQa9K33J60= golang.org/x/net v0.50.0/go.mod h1:UgoSli3F/pBgdJBHCTc+tp3gmrU4XswgGRgtnwWTfyM= golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= diff --git a/packages/envd/go.mod b/packages/envd/go.mod index 6175013054..4d86a8140b 100644 --- a/packages/envd/go.mod +++ b/packages/envd/go.mod @@ -73,7 +73,7 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.1 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sync v0.20.0 // indirect golang.org/x/text v0.35.0 // indirect diff --git a/packages/envd/go.sum b/packages/envd/go.sum index 0834f4de21..737d5e4e69 100644 --- a/packages/envd/go.sum +++ b/packages/envd/go.sum @@ -213,8 +213,7 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts= golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= diff --git a/packages/local-dev/go.mod b/packages/local-dev/go.mod index bd14d7b6ba..8429d7768d 100644 --- a/packages/local-dev/go.mod +++ b/packages/local-dev/go.mod @@ -10,7 +10,7 @@ require ( github.com/e2b-dev/infra/packages/db v0.0.0 github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/google/uuid v1.6.0 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/pressly/goose/v3 v3.26.0 github.com/stretchr/testify v1.11.1 github.com/testcontainers/testcontainers-go/modules/postgres v0.39.0 diff --git a/packages/local-dev/go.sum b/packages/local-dev/go.sum index 5878ebb18b..4496ed8763 100644 --- a/packages/local-dev/go.sum +++ b/packages/local-dev/go.sum @@ -69,8 +69,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/klauspost/compress v1.18.2 h1:iiPHWW0YrcFgpBYhsA6D1+fqHssJscY/Tm/y2Uqnapk= diff --git a/packages/orchestrator/go.mod b/packages/orchestrator/go.mod index cd896e8d7a..0d5dde3fe9 100644 --- a/packages/orchestrator/go.mod +++ b/packages/orchestrator/go.mod @@ -309,7 +309,7 @@ require ( golang.org/x/arch v0.18.0 // indirect golang.org/x/crypto v0.48.0 // indirect golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/oauth2 v0.34.0 // indirect golang.org/x/term v0.40.0 // indirect diff --git a/packages/orchestrator/go.sum b/packages/orchestrator/go.sum index f75731182b..4416ad76b9 100644 --- a/packages/orchestrator/go.sum +++ b/packages/orchestrator/go.sum @@ -1428,8 +1428,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/packages/shared/go.mod b/packages/shared/go.mod index 093aa0ae83..e1d9394cc7 100644 --- a/packages/shared/go.mod +++ b/packages/shared/go.mod @@ -54,7 +54,7 @@ require ( go.opentelemetry.io/otel/sdk/metric v1.41.0 go.opentelemetry.io/otel/trace v1.41.0 go.uber.org/zap v1.27.1 - golang.org/x/mod v0.33.0 + golang.org/x/mod v0.34.0 golang.org/x/oauth2 v0.34.0 golang.org/x/sync v0.20.0 google.golang.org/api v0.257.0 diff --git a/packages/shared/go.sum b/packages/shared/go.sum index a58712d7d6..c4e534ef81 100644 --- a/packages/shared/go.sum +++ b/packages/shared/go.sum @@ -1023,8 +1023,7 @@ golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= diff --git a/tests/integration/go.mod b/tests/integration/go.mod index a0e9a6bb59..7a4019b2de 100644 --- a/tests/integration/go.mod +++ b/tests/integration/go.mod @@ -25,7 +25,7 @@ require ( github.com/e2b-dev/infra/packages/envd v0.0.0-00010101000000-000000000000 github.com/e2b-dev/infra/packages/shared v0.0.0 github.com/google/uuid v1.6.0 - github.com/jackc/pgx/v5 v5.7.5 + github.com/jackc/pgx/v5 v5.9.1 github.com/oapi-codegen/runtime v1.1.1 github.com/stretchr/testify v1.11.1 golang.org/x/sync v0.20.0 @@ -176,7 +176,7 @@ require ( go.uber.org/zap v1.27.1 // indirect golang.org/x/arch v0.18.0 // indirect golang.org/x/crypto v0.48.0 // indirect - golang.org/x/mod v0.33.0 // indirect + golang.org/x/mod v0.34.0 // indirect golang.org/x/net v0.50.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/term v0.40.0 // indirect diff --git a/tests/integration/go.sum b/tests/integration/go.sum index 83d28a5089..a26fb9f43a 100644 --- a/tests/integration/go.sum +++ b/tests/integration/go.sum @@ -182,8 +182,7 @@ github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsI github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.7.5 h1:JHGfMnQY+IEtGM63d+NGMjoRpysB2JBwDr5fsngwmJs= -github.com/jackc/pgx/v5 v5.7.5/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= +github.com/jackc/pgx/v5 v5.9.1 h1:uwrxJXBnx76nyISkhr33kQLlUqjv7et7b9FjCen/tdc= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jellydator/ttlcache/v3 v3.4.0 h1:YS4P125qQS0tNhtL6aeYkheEaB/m8HCqdMMP4mnWdTY= @@ -447,8 +446,7 @@ golang.org/x/exp v0.0.0-20260212183809-81e46e3db34a/go.mod h1:K79w1Vqn7PoiZn+TkN golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.33.0 h1:tHFzIWbBifEmbwtGz65eaWyGiGZatSrT9prnU8DbVL8= -golang.org/x/mod v0.33.0/go.mod h1:swjeQEj+6r7fODbD2cqrnje9PnziFuw4bmLbBZFrQ5w= +golang.org/x/mod v0.34.0 h1:xIHgNUUnW6sYkcM5Jleh05DvLOtwc6RitGHbDk4akRI= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= From c98878f16605bd02d3f24851bcb8092885e51661 Mon Sep 17 00:00:00 2001 From: ben-fornefeld Date: Wed, 1 Apr 2026 19:17:49 -0700 Subject: [PATCH 19/20] chore: fix lint --- .../dashboard-api/internal/backgroundworker/auth_user_sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go index b962bec78b..f999926ae2 100644 --- a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go +++ b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go @@ -36,7 +36,7 @@ type AuthUserSyncWorker struct { } func NewAuthUserSyncWorker(mainDB *sqlcdb.Client, l logger.Logger) *AuthUserSyncWorker { - jobsCounter, err := otel.Meter("github.com/e2b-dev/infra/packages/dashboard-api/internal/backgroundworker") + jobsCounter, err := otel.Meter("dashboard-api.backgroundworker.auth_user_sync").Int64Counter( "dashboard_api.auth_user_sync.jobs_total", metric.WithDescription("Total auth user sync jobs by operation and result."), metric.WithUnit("{job}"), From e314b9604456f92b77b0003faf6b1fe4cbe4adb4 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Thu, 2 Apr 2026 02:22:48 +0000 Subject: [PATCH 20/20] chore: auto-commit generated changes --- .../dashboard-api/internal/backgroundworker/auth_user_sync.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go index f999926ae2..b962bec78b 100644 --- a/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go +++ b/packages/dashboard-api/internal/backgroundworker/auth_user_sync.go @@ -36,7 +36,7 @@ type AuthUserSyncWorker struct { } func NewAuthUserSyncWorker(mainDB *sqlcdb.Client, l logger.Logger) *AuthUserSyncWorker { - jobsCounter, err := otel.Meter("dashboard-api.backgroundworker.auth_user_sync").Int64Counter( + jobsCounter, err := otel.Meter("github.com/e2b-dev/infra/packages/dashboard-api/internal/backgroundworker") "dashboard_api.auth_user_sync.jobs_total", metric.WithDescription("Total auth user sync jobs by operation and result."), metric.WithUnit("{job}"),