Skip to content

Commit 6a107a5

Browse files
committed
roachtest: compact backups in backup restore roundtrip
This patch enables the conventional restore roundtrip tests to run compacted backups. Once the limit on inc layers to online restore from is lifted, we'll enable compacted backups to run on online restore as well. Epic: none Release note: none
1 parent 9f899e1 commit 6a107a5

File tree

2 files changed

+81
-15
lines changed

2 files changed

+81
-15
lines changed

pkg/cmd/roachtest/tests/backup_restore_roundtrip.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,8 @@ func backupRestoreRoundTrip(
122122

123123
return conn, err
124124
}
125-
testUtils, err := newCommonTestUtils(ctx, t, c, connectFunc, c.CRDBNodes(), withMock(sp.mock), withOnlineRestore(sp.onlineRestore))
125+
// TODO (msbutler): enable compaction for online restore test once inc layer limit is increased.
126+
testUtils, err := newCommonTestUtils(ctx, t, c, connectFunc, c.CRDBNodes(), withMock(sp.mock), withOnlineRestore(sp.onlineRestore), withCompaction(!sp.onlineRestore))
126127
if err != nil {
127128
return err
128129
}

pkg/cmd/roachtest/tests/mixed_version_backup.go

Lines changed: 79 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -179,7 +179,7 @@ var (
179179
}
180180

181181
possibleNumIncrementalBackups = []int{
182-
1,
182+
2,
183183
3,
184184
}
185185

@@ -410,6 +410,14 @@ type (
410410
incNum int
411411
}
412412

413+
compactedBackup struct {
414+
collection backupCollection
415+
startTime string
416+
endTime string
417+
418+
fullSubdir string
419+
}
420+
413421
// labeledNodes allows us to label a set of nodes with the version
414422
// they are running, to allow for human-readable backup names
415423
labeledNodes struct {
@@ -456,6 +464,7 @@ func tableNamesWithDB(db string, tables []string) []string {
456464

457465
func (fb fullBackup) String() string { return "full" }
458466
func (ib incrementalBackup) String() string { return "incremental" }
467+
func (cb compactedBackup) String() string { return "compacted" }
459468

460469
func (rh revisionHistory) String() string {
461470
return "revision_history"
@@ -1866,6 +1875,14 @@ func (d *BackupRestoreTestDriver) runBackup(
18661875
collection = b.collection
18671876
latest = " LATEST IN"
18681877
l.Printf("creating incremental backup num %d for %s", b.incNum, collection.name)
1878+
case compactedBackup:
1879+
collection = b.collection
1880+
1881+
// This latest var is only required to comply with this driver. It has no
1882+
// effect on compacted backup construction, as the full subdir passed to the
1883+
// cmd determines the full we compact on.
1884+
latest = " LATEST IN"
1885+
l.Printf("creating compacted backup for %s from start %s to end %s", collection.name, b.startTime, b.endTime)
18691886
}
18701887

18711888
for _, opt := range collection.options {
@@ -1883,12 +1900,24 @@ func (d *BackupRestoreTestDriver) runBackup(
18831900
backupTime,
18841901
strings.Join(options, ", "),
18851902
)
1886-
l.Printf("creating %s backup via node %d: %s", bType, node, stmt)
1903+
18871904
var jobID int
1888-
if err := db.QueryRowContext(ctx, stmt).Scan(&jobID); err != nil {
1889-
return backupCollection{}, "", fmt.Errorf("error while creating %s backup %s: %w", bType, collection.name, err)
1905+
if compactData, ok := bType.(compactedBackup); ok {
1906+
backupTime = compactData.endTime
1907+
if err := db.QueryRowContext(ctx,
1908+
`SELECT crdb_internal.backup_compaction(
1909+
$1,
1910+
$2,
1911+
$3::DECIMAL, $4::DECIMAL
1912+
)`, stmt, compactData.fullSubdir, compactData.startTime, compactData.endTime).Scan(&jobID); err != nil {
1913+
return backupCollection{}, "", fmt.Errorf("error while creating %s compacted backup %s: %w", bType, collection.name, err)
1914+
}
1915+
} else {
1916+
l.Printf("creating %s backup via node %d: %s", bType, node, stmt)
1917+
if err := db.QueryRowContext(ctx, stmt).Scan(&jobID); err != nil {
1918+
return backupCollection{}, "", fmt.Errorf("error while creating %s backup %s: %w", bType, collection.name, err)
1919+
}
18901920
}
1891-
18921921
backupErr := make(chan error)
18931922
tasker.Go(func(ctx context.Context, l *logger.Logger) error {
18941923
defer close(backupErr)
@@ -2024,6 +2053,7 @@ func (d *BackupRestoreTestDriver) createBackupCollection(
20242053
isMultitenant bool,
20252054
) (*backupCollection, error) {
20262055
var collection backupCollection
2056+
backupEndTimes := make([]string, 0)
20272057
var latestIncBackupEndTime string
20282058
var fullBackupEndTime string
20292059

@@ -2038,14 +2068,12 @@ func (d *BackupRestoreTestDriver) createBackupCollection(
20382068
}); err != nil {
20392069
return nil, err
20402070
}
2071+
backupEndTimes = append(backupEndTimes, fullBackupEndTime)
20412072

20422073
// Create incremental backups.
20432074
numIncrementals := possibleNumIncrementalBackups[rng.Intn(len(possibleNumIncrementalBackups))]
20442075
if d.testUtils.mock {
2045-
numIncrementals = 1
2046-
}
2047-
if d.testUtils.onlineRestore {
2048-
numIncrementals = 0
2076+
numIncrementals = 2
20492077
}
20502078
l.Printf("creating %d incremental backups", numIncrementals)
20512079
for i := 0; i < numIncrementals; i++ {
@@ -2060,6 +2088,36 @@ func (d *BackupRestoreTestDriver) createBackupCollection(
20602088
}); err != nil {
20612089
return nil, err
20622090
}
2091+
backupEndTimes = append(backupEndTimes, latestIncBackupEndTime)
2092+
2093+
if d.testUtils.compactionEnabled && !collection.withRevisionHistory() && len(backupEndTimes) >= 3 {
2094+
// Require that endIdx - startIdx >= 2 so at least 2 inc backups are
2095+
// compacted. If there are 3 backupEndTimes, the start must be the the
2096+
// 0th. Thn endIdx is always the last index for now, so the compacted
2097+
// backup gets selected to restore.
2098+
startIdx := rng.Intn(len(backupEndTimes) - 2)
2099+
endIdx := len(backupEndTimes) - 1
2100+
2101+
var fullPath string
2102+
_, db := d.testUtils.RandomDB(rng, d.roachNodes)
2103+
row := db.QueryRowContext(ctx, fmt.Sprintf(`SELECT path
2104+
FROM [SHOW BACKUPS IN '%s']
2105+
ORDER BY path DESC
2106+
LIMIT 1`, collection.uri()))
2107+
if err := row.Scan(&fullPath); err != nil {
2108+
return nil, errors.Wrapf(err, "error while getting full backup path %s", collection.name)
2109+
}
2110+
compact := compactedBackup{collection: collection, startTime: backupEndTimes[startIdx], endTime: backupEndTimes[endIdx], fullSubdir: fullPath}
2111+
if err := d.testUtils.runJobOnOneOf(ctx, l, incBackupSpec.Execute.Nodes, func() error {
2112+
var err error
2113+
collection, latestIncBackupEndTime, err = d.runBackup(
2114+
ctx, l, tasker, rng, incBackupSpec.Plan.Nodes, incBackupSpec.PauseProbability,
2115+
compact, internalSystemJobs, isMultitenant)
2116+
return err
2117+
}); err != nil {
2118+
return nil, err
2119+
}
2120+
}
20632121
}
20642122

20652123
if err := collection.maybeUseRestoreAOST(l, rng, fullBackupEndTime, latestIncBackupEndTime); err != nil {
@@ -2832,11 +2890,12 @@ func prepSchemaChangeWorkload(
28322890
}
28332891

28342892
type CommonTestUtils struct {
2835-
t test.Test
2836-
cluster cluster.Cluster
2837-
roachNodes option.NodeListOption
2838-
mock bool
2839-
onlineRestore bool
2893+
t test.Test
2894+
cluster cluster.Cluster
2895+
roachNodes option.NodeListOption
2896+
mock bool
2897+
onlineRestore bool
2898+
compactionEnabled bool
28402899

28412900
connCache struct {
28422901
mu syncutil.Mutex
@@ -2858,6 +2917,12 @@ func withOnlineRestore(or bool) commonTestOption {
28582917
}
28592918
}
28602919

2920+
func withCompaction(c bool) commonTestOption {
2921+
return func(cu *CommonTestUtils) {
2922+
cu.compactionEnabled = c
2923+
}
2924+
}
2925+
28612926
// Change the function signature
28622927
func newCommonTestUtils(
28632928
ctx context.Context,

0 commit comments

Comments
 (0)