Skip to content

Commit b92948d

Browse files
committed
backup: remove old incremental backup location lookup
In pre-22.2 backups, incremental backups were written to the same directory as the full backup. This was removed in 22.2, but logic was kept in place to maintain backwards compatibility and allow later versions to restore from pre-22.2 backups. However, given that we are now on 25.4 and are many major versions past the removal of the old default location, the legacy logic is now being removed to facilitate the development of backup indexes. Epic: None Release note: None
1 parent 52d5277 commit b92948d

File tree

3 files changed

+2
-203
lines changed

3 files changed

+2
-203
lines changed

pkg/backup/backup_test.go

Lines changed: 0 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -9958,118 +9958,6 @@ func TestUserfileNormalizationIncrementalShowBackup(t *testing.T) {
99589958
sqlDB.Exec(t, query)
99599959
}
99609960

9961-
// TestBackupRestoreOldIncrementalDefaults tests that a call to restore
9962-
// will correctly load an incremental backup in the old "default" directory.
9963-
// That old default is literally just "the same directory as the full backup",
9964-
// so write that manually with the `incremental_location` option then check that
9965-
// a read without that option contains the same data.
9966-
func TestBackupRestoreOldIncrementalDefault(t *testing.T) {
9967-
defer leaktest.AfterTest(t)()
9968-
defer log.Scope(t).Close(t)
9969-
9970-
skip.UnderRace(t, "multinode cluster setup times out under race, likely due to resource starvation.")
9971-
9972-
const numAccounts = 1
9973-
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, multiNode, numAccounts, InitManualReplication)
9974-
defer cleanupFn()
9975-
9976-
const c1, c2, c3 = `nodelocal://1/full/`, `nodelocal://1/full/`, `nodelocal://2/full/`
9977-
9978-
// Deliberately the same. We're simulating an incremental backup in the old
9979-
// default directory, i.e. the top-level collection directory.
9980-
const i1, i2, i3 = `nodelocal://1/full/`, `nodelocal://1/full/`, `nodelocal://2/full/`
9981-
9982-
collections := []string{
9983-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c1, url.QueryEscape("default")),
9984-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c2, url.QueryEscape("dc=dc1")),
9985-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c3, url.QueryEscape("dc=dc2")),
9986-
}
9987-
9988-
incrementals := []string{
9989-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i1, url.QueryEscape("default")),
9990-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i2, url.QueryEscape("dc=dc1")),
9991-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i3, url.QueryEscape("dc=dc2")),
9992-
}
9993-
tests := []struct {
9994-
dest []string
9995-
inc []string
9996-
}{{dest: []string{collections[0]}, inc: []string{incrementals[0]}},
9997-
{dest: collections, inc: incrementals}}
9998-
9999-
for _, br := range tests {
10000-
dest := strings.Join(br.dest, ", ")
10001-
inc := strings.Join(br.inc, ", ")
10002-
10003-
if len(br.dest) > 1 {
10004-
dest = "(" + dest + ")"
10005-
inc = "(" + inc + ")"
10006-
}
10007-
// create db
10008-
sqlDB.Exec(t, `CREATE DATABASE fkdb`)
10009-
sqlDB.Exec(t, `CREATE TABLE fkdb.fk (ind INT)`)
10010-
10011-
for i := 0; i < 10; i++ {
10012-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, i)
10013-
}
10014-
fb := fmt.Sprintf("BACKUP DATABASE fkdb INTO %s", dest)
10015-
sqlDB.Exec(t, fb)
10016-
10017-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, 200)
10018-
10019-
sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", dest, inc)
10020-
sqlDB.Exec(t, sib)
10021-
10022-
sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb'", dest)
10023-
sqlDB.Exec(t, sir)
10024-
10025-
sqlDB.CheckQueryResults(t, `SELECT * FROM inc_fkdb.fk`, sqlDB.QueryStr(t, `SELECT * FROM fkdb.fk`))
10026-
10027-
sqlDB.Exec(t, "DROP DATABASE fkdb")
10028-
sqlDB.Exec(t, "DROP DATABASE inc_fkdb;")
10029-
}
10030-
}
10031-
10032-
func TestBackupRestoreErrorsOnBothDefaultsPopulated(t *testing.T) {
10033-
defer leaktest.AfterTest(t)()
10034-
defer log.Scope(t).Close(t)
10035-
10036-
const numAccounts = 1
10037-
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
10038-
defer cleanupFn()
10039-
10040-
base := `'nodelocal://1/full/'`
10041-
oldInc := base
10042-
newInc := `'nodelocal://1/full/incrementals'`
10043-
10044-
// create db
10045-
sqlDB.Exec(t, `CREATE DATABASE fkdb`)
10046-
sqlDB.Exec(t, `CREATE TABLE fkdb.fk (ind INT)`)
10047-
10048-
for i := 0; i < 10; i++ {
10049-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, i)
10050-
}
10051-
fb := fmt.Sprintf("BACKUP DATABASE fkdb INTO %s", base)
10052-
sqlDB.Exec(t, fb)
10053-
10054-
sibOld := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, oldInc)
10055-
sqlDB.Exec(t, sibOld)
10056-
10057-
sibNew := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, newInc)
10058-
sqlDB.Exec(t, sibNew)
10059-
10060-
irDefault := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb'", base)
10061-
sqlDB.ExpectErr(t, "pq: Incremental layers found in both old and new default locations. "+
10062-
"Please choose a location manually with the `incremental_location` parameter.", irDefault)
10063-
10064-
irOld := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb_old_default', "+
10065-
"incremental_location=%s", base, base)
10066-
sqlDB.Exec(t, irOld)
10067-
10068-
irNew := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb_new_default', "+
10069-
"incremental_location=%s", base, newInc)
10070-
sqlDB.Exec(t, irNew)
10071-
}
10072-
100739961
// TestBackupRestoreSeparateExplicitIsDefault tests that a backup/restore round
100749962
// trip using the 'incremental_location' parameter restores the same db as a BR
100759963
// round trip without the parameter, even when that location is in fact the default.

pkg/backup/backupdest/backup_destination_test.go

Lines changed: 1 addition & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,6 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
133133
inc5Time := inc4Time.Add(time.Minute * 30)
134134
inc6Time := inc5Time.Add(time.Minute * 30)
135135
inc7Time := inc6Time.Add(time.Minute * 30)
136-
full3Time := inc7Time.Add(time.Minute * 30)
137-
inc8Time := full3Time.Add(time.Minute * 30)
138-
inc9Time := inc8Time.Add(time.Minute * 30)
139-
full4Time := inc9Time.Add(time.Minute * 30)
140136

141137
// firstBackupChain is maintained throughout the tests as the history of
142138
// backups that were taken based on the initial full backup.
@@ -155,6 +151,7 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
155151
testCollectionBackup := func(t *testing.T, backupTime time.Time,
156152
expectedDefault, expectedSuffix, expectedIncDir string, expectedPrevBackups []string,
157153
appendToLatest bool, subdir string, incrementalTo []string) {
154+
t.Helper()
158155

159156
endTime := hlc.Timestamp{WallTime: backupTime.UnixNano()}
160157

@@ -335,63 +332,6 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
335332
true /* intoLatest */, expectedSubdir, customIncrementalTo)
336333
writeManifest(t, expectedDefault, inc7Time)
337334
}
338-
339-
// A new full backup: BACKUP INTO collection
340-
var backup3Location string
341-
{
342-
expectedSuffix := "/2020/12/25-103000.00"
343-
expectedIncDir := ""
344-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s?AUTH=implicit", t.Name(), expectedSuffix)
345-
backup3Location = expectedDefault
346-
347-
testCollectionBackup(t, full3Time,
348-
expectedDefault, expectedSuffix, expectedIncDir, []string(nil),
349-
false /* intoLatest */, noExplicitSubDir, noIncrementalStorage)
350-
writeManifest(t, expectedDefault, full3Time)
351-
// We also wrote a new full backup, so let's update the latest.
352-
writeLatest(t, collectionLoc, expectedSuffix)
353-
}
354-
355-
// A remote incremental into the third full backup: BACKUP INTO LATEST
356-
// IN collection, BUT with a trick. Write a (fake) incremental backup
357-
// to the old directory, to be sure that subsequent incremental backups
358-
// go there as well (and not the newer incrementals/ subdir.)
359-
{
360-
expectedSuffix := "/2020/12/25-103000.00"
361-
expectedIncDir := "/20201225/110000.00-20201225-103000.00"
362-
363-
// Writes the (fake) incremental backup.
364-
oldStyleDefault := fmt.Sprintf("nodelocal://1/%s%s%s?AUTH=implicit",
365-
t.Name(),
366-
expectedSuffix, expectedIncDir)
367-
writeManifest(t, oldStyleDefault, inc8Time)
368-
369-
expectedSuffix = "/2020/12/25-103000.00"
370-
expectedIncDir = "/20201225/113000.00-20201225-110000.00"
371-
expectedSubdir := expectedSuffix
372-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s%s?AUTH=implicit",
373-
t.Name(),
374-
expectedSuffix, expectedIncDir)
375-
376-
testCollectionBackup(t, inc9Time,
377-
expectedDefault, expectedSuffix, expectedIncDir, []string{backup3Location, oldStyleDefault},
378-
true /* intoLatest */, expectedSubdir, noIncrementalStorage)
379-
writeManifest(t, expectedDefault, inc9Time)
380-
}
381-
382-
// A new full backup: BACKUP INTO collection
383-
{
384-
expectedSuffix := "/2020/12/25-120000.00"
385-
expectedIncDir := ""
386-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s?AUTH=implicit", t.Name(), expectedSuffix)
387-
388-
testCollectionBackup(t, full4Time,
389-
expectedDefault, expectedSuffix, expectedIncDir, []string(nil),
390-
false /* intoLatest */, noExplicitSubDir, noIncrementalStorage)
391-
writeManifest(t, expectedDefault, full4Time)
392-
// We also wrote a new full backup, so let's update the latest.
393-
writeLatest(t, collectionLoc, expectedSuffix)
394-
}
395335
})
396336
})
397337
}

pkg/backup/backupdest/incrementals.go

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -196,45 +196,16 @@ func ResolveIncrementalsBackupLocation(
196196
return incPaths, nil
197197
}
198198

199-
resolvedIncrementalsBackupLocationOld, err := backuputils.AppendPaths(fullBackupCollections, subdir)
200-
if err != nil {
201-
return nil, err
202-
}
203-
204-
// We can have >1 full backup collection specified, but each will have an
205-
// incremental layer iff all of them do. So it suffices to check only the
206-
// first.
207-
// Check we can read from this location, though we don't need the backups here.
208-
prevOld, err := backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocationOld[0])
209-
if err != nil {
210-
return nil, err
211-
}
212-
213199
resolvedIncrementalsBackupLocation, err := backuputils.AppendPaths(fullBackupCollections, backupbase.DefaultIncrementalsSubdir, subdir)
214200
if err != nil {
215201
return nil, err
216202
}
217203

218-
prev, err := backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocation[0])
204+
_, err = backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocation[0])
219205
if err != nil {
220206
return nil, err
221207
}
222208

223-
// TODO(bardin): This algorithm divides "destination resolution" and "actual backup lookup" for historical reasons,
224-
// but this doesn't quite make sense now that destination resolution depends on backup lookup.
225-
// Try to figure out a clearer way to organize this.
226-
if len(prevOld) > 0 && len(prev) > 0 {
227-
return nil, errors.New(
228-
"Incremental layers found in both old and new default locations. " +
229-
"Please choose a location manually with the `incremental_location` parameter.")
230-
}
231-
232-
// If we have backups in the old default location, continue to use the old location.
233-
if len(prevOld) > 0 {
234-
return resolvedIncrementalsBackupLocationOld, nil
235-
}
236-
237-
// Otherwise, use the new location.
238209
return resolvedIncrementalsBackupLocation, nil
239210
}
240211

0 commit comments

Comments
 (0)