Skip to content

Commit 2715478

Browse files
committed
testing: add lock/locktesting
1 parent ebcf1ac commit 2715478

File tree

2 files changed

+16
-16
lines changed

2 files changed

+16
-16
lines changed

internal/testing/integration/locking/postgres_table_locking_test.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@ import (
99
"time"
1010

1111
"github.com/pressly/goose/v3"
12-
"github.com/pressly/goose/v3/internal/testing/locktest"
1312
"github.com/pressly/goose/v3/internal/testing/testdb"
1413
"github.com/pressly/goose/v3/lock"
14+
"github.com/pressly/goose/v3/lock/locktesting"
1515
"github.com/pressly/goose/v3/testdata"
1616
"github.com/stretchr/testify/require"
1717
)
@@ -43,7 +43,7 @@ func TestConcurrentTableLocking(t *testing.T) {
4343
return locker
4444
}
4545

46-
locktest.TestConcurrentLocking(t, db, newLocker, 1*time.Second)
46+
locktesting.TestConcurrentLocking(t, db, newLocker, 1*time.Second)
4747
}
4848

4949
func TestSequentialTableLocking(t *testing.T) {
@@ -119,7 +119,7 @@ func TestLockerImplementations(t *testing.T) {
119119
// Use the same lock ID for all providers so they compete for the same table row
120120
sharedLockID := rand.Int64()
121121

122-
locktest.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
122+
locktesting.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
123123
t.Helper()
124124

125125
// Create a UNIQUE table-based locker instance per provider, but same lock ID
@@ -153,7 +153,7 @@ func TestLockerImplementations(t *testing.T) {
153153
)
154154
require.NoError(t, err)
155155

156-
locktest.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
156+
locktesting.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
157157
t.Helper()
158158

159159
p, err := goose.NewProvider(
@@ -176,7 +176,7 @@ func TestLockerImplementations(t *testing.T) {
176176
// Use the same lock ID for all providers so they compete for the same advisory lock
177177
sharedLockID := rand.Int64()
178178

179-
locktest.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
179+
locktesting.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
180180
t.Helper()
181181

182182
// Each provider gets a UNIQUE session locker instance, but same lock ID
@@ -211,7 +211,7 @@ func TestLockerImplementations(t *testing.T) {
211211
)
212212
require.NoError(t, err)
213213

214-
locktest.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
214+
locktesting.TestProviderLocking(t, func(t *testing.T) *goose.Provider {
215215
t.Helper()
216216

217217
p, err := goose.NewProvider(
Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,9 @@
1-
package locktest
1+
package locktesting
22

33
import (
44
"context"
55
"database/sql"
66
"fmt"
7-
"log/slog"
87
"sync"
98
"testing"
109
"time"
@@ -56,7 +55,7 @@ func TestProviderLocking(
5655

5756
for i := range count {
5857
g.Go(func() error {
59-
ctx := t.Context()
58+
ctx := context.Background()
6059
migrationResults, err := providers[i].Up(ctx)
6160
if err != nil {
6261
return err
@@ -123,13 +122,14 @@ func TestConcurrentLocking(
123122
lockTimeout time.Duration,
124123
) {
125124
t.Helper()
125+
ctx := context.Background()
126126

127127
// TODO(mf): I wonder if there's a better way to do logging in tests that conditionally enables
128128
// it. Maybe using testing.T.Log? But that doesn't have levels. Maybe use a global flag to
129129
// enable debug logging in tests?
130130

131131
// logger := slog.New(slog.NewTextHandler(os.Stderr, &slog.HandlerOptions{Level: slog.LevelDebug}))
132-
logger := slog.New(slog.DiscardHandler)
132+
// logger := slog.New(slog.DiscardHandler)
133133

134134
// Number of concurrent lockers to test
135135
const count = 5
@@ -149,27 +149,27 @@ func TestConcurrentLocking(
149149
go func() {
150150
defer wg.Done()
151151

152-
ctx, cancel := context.WithTimeout(t.Context(), lockTimeout)
152+
ctx, cancel := context.WithTimeout(ctx, lockTimeout)
153153
defer cancel()
154154

155155
// Try to acquire the lock
156156
if err := lockers[i].Lock(ctx, db); err != nil {
157-
logger.Debug("Locker failed to acquire lock", slog.Int("locker", i), slog.String("error", err.Error()))
157+
// logger.Debug("Locker failed to acquire lock", slog.Int("locker", i), slog.String("error", err.Error()))
158158
return
159159
}
160160

161161
successCh <- i
162-
logger.Debug("Locker acquired lock", slog.Int("locker", i))
162+
// logger.Debug("Locker acquired lock", slog.Int("locker", i))
163163

164164
// Hold the lock long enough for all other goroutines to exhaust their retries. This
165165
// ensures only ONE locker succeeds in the concurrent test
166166
time.Sleep(lockTimeout * 2)
167167

168168
// Release the lock
169-
if err := lockers[i].Unlock(t.Context(), db); err != nil {
169+
if err := lockers[i].Unlock(ctx, db); err != nil {
170170
t.Errorf("Locker %d failed to release lock: %v", i, err)
171171
} else {
172-
logger.Debug("Locker released lock", slog.Int("locker", i))
172+
// logger.Debug("Locker released lock", slog.Int("locker", i))
173173
}
174174
}()
175175
}
@@ -195,5 +195,5 @@ func TestConcurrentLocking(
195195
}
196196

197197
require.Equal(t, 1, len(successful), "Exactly one locker should acquire the lock")
198-
logger.Debug("Concurrent locking test passed", slog.Int("winning_locker", successful[0]))
198+
// logger.Debug("Concurrent locking test passed", slog.Int("winning_locker", successful[0]))
199199
}

0 commit comments

Comments
 (0)