Skip to content

Commit bcbb5bd

Browse files
craig[bot]kev-cao
andcommitted
Merge #144644
144644: backup: prevent compactions on locality aware backups r=msbutler a=kev-cao While compactions *can* compact backups that have been partitioned by locality, the end product itself is not partitioned. Until that support is implemented, we should prevent compactions on locality aware backups. Epic: None Release note: Backup compactions does not yet support locality aware backups. Co-authored-by: Kevin Cao <[email protected]>
2 parents 6d76d83 + de51c91 commit bcbb5bd

File tree

2 files changed

+12
-83
lines changed

2 files changed

+12
-83
lines changed

pkg/backup/compaction_job.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,8 @@ func maybeStartCompactionJob(
8484
return 0, errors.New("custom incremental storage location not supported for compaction")
8585
case len(triggerJob.SpecificTenantIds) != 0 || triggerJob.IncludeAllSecondaryTenants:
8686
return 0, errors.New("backups of tenants not supported for compaction")
87+
case len(triggerJob.URIsByLocalityKV) != 0:
88+
return 0, errors.New("locality aware backups not supported for compaction")
8789
}
8890

8991
env := scheduledjobs.ProdJobSchedulerEnv

pkg/backup/compaction_test.go

Lines changed: 10 additions & 83 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@ import (
1010
"encoding/json"
1111
"fmt"
1212
"math/rand"
13-
"net/url"
1413
"strconv"
1514
"strings"
1615
"sync/atomic"
@@ -28,7 +27,6 @@ import (
2827
"github.com/cockroachdb/cockroach/pkg/sql"
2928
"github.com/cockroachdb/cockroach/pkg/testutils"
3029
"github.com/cockroachdb/cockroach/pkg/testutils/jobutils"
31-
"github.com/cockroachdb/cockroach/pkg/testutils/skip"
3230
"github.com/cockroachdb/cockroach/pkg/testutils/sqlutils"
3331
"github.com/cockroachdb/cockroach/pkg/util/hlc"
3432
"github.com/cockroachdb/cockroach/pkg/util/leaktest"
@@ -419,87 +417,6 @@ crdb_internal.json_to_pb(
419417
// iterator, add tests for dropped tables/indexes.
420418
}
421419

422-
func TestBackupCompactionLocalityAware(t *testing.T) {
423-
defer leaktest.AfterTest(t)()
424-
defer log.Scope(t).Close(t)
425-
skip.UnderDuress(t, "node startup is slow")
426-
427-
tempDir, tempDirCleanup := testutils.TempDir(t)
428-
defer tempDirCleanup()
429-
tc, db, cleanupDB := backupRestoreTestSetupEmpty(
430-
t, multiNode, tempDir, InitManualReplication,
431-
base.TestClusterArgs{
432-
ServerArgsPerNode: map[int]base.TestServerArgs{
433-
0: {
434-
Locality: roachpb.Locality{Tiers: []roachpb.Tier{
435-
{Key: "region", Value: "west"},
436-
{Key: "az", Value: "az1"},
437-
{Key: "dc", Value: "dc1"},
438-
}},
439-
},
440-
1: {
441-
Locality: roachpb.Locality{Tiers: []roachpb.Tier{
442-
{Key: "region", Value: "east"},
443-
{Key: "az", Value: "az1"},
444-
{Key: "dc", Value: "dc2"},
445-
}},
446-
},
447-
2: {
448-
Locality: roachpb.Locality{Tiers: []roachpb.Tier{
449-
{Key: "region", Value: "east"},
450-
{Key: "az", Value: "az2"},
451-
{Key: "dc", Value: "dc3"},
452-
}},
453-
},
454-
},
455-
},
456-
)
457-
defer cleanupDB()
458-
collectionURIs := strings.Join([]string{
459-
fmt.Sprintf(
460-
"'nodelocal://1/backup?COCKROACH_LOCALITY=%s'",
461-
url.QueryEscape("default"),
462-
),
463-
fmt.Sprintf(
464-
"'nodelocal://2/backup?COCKROACH_LOCALITY=%s'",
465-
url.QueryEscape("dc=dc2"),
466-
),
467-
fmt.Sprintf(
468-
"'nodelocal://3/backup?COCKROACH_LOCALITY=%s'",
469-
url.QueryEscape("region=west"),
470-
),
471-
}, ", ")
472-
db.Exec(t, "CREATE TABLE foo (a INT, b INT)")
473-
db.Exec(t, "INSERT INTO foo VALUES (1, 1)")
474-
start := getTime(t)
475-
db.Exec(
476-
t,
477-
fmt.Sprintf("BACKUP INTO (%s) AS OF SYSTEM TIME '%d'", collectionURIs, start),
478-
)
479-
480-
db.Exec(t, "INSERT INTO foo VALUES (2, 2)")
481-
db.Exec(
482-
t,
483-
fmt.Sprintf("BACKUP INTO LATEST IN (%s)", collectionURIs),
484-
)
485-
486-
db.Exec(t, "INSERT INTO foo VALUES (3, 3)")
487-
end := getTime(t)
488-
db.Exec(
489-
t,
490-
fmt.Sprintf("BACKUP INTO LATEST IN (%s) AS OF SYSTEM TIME '%d'", collectionURIs, end),
491-
)
492-
compactionBuiltin := "SELECT crdb_internal.backup_compaction(ARRAY[%s], '%s', '', %d::DECIMAL, %d::DECIMAL)"
493-
494-
var backupPath string
495-
db.QueryRow(t, "SHOW BACKUPS IN 'nodelocal://1/backup'").Scan(&backupPath)
496-
row := db.QueryRow(t, fmt.Sprintf(compactionBuiltin, collectionURIs, backupPath, start, end))
497-
var jobID jobspb.JobID
498-
row.Scan(&jobID)
499-
waitForSuccessfulJob(t, tc, jobID)
500-
validateCompactedBackupForTables(t, db, []string{"foo"}, collectionURIs, start, end, 2)
501-
}
502-
503420
func TestScheduledBackupCompaction(t *testing.T) {
504421
defer leaktest.AfterTest(t)()
505422
defer log.Scope(t).Close(t)
@@ -681,6 +598,16 @@ func TestBackupCompactionUnsupportedOptions(t *testing.T) {
681598
},
682599
"backups of tenants not supported for compaction",
683600
},
601+
{
602+
"locality aware backups not supported",
603+
jobspb.BackupDetails{
604+
ScheduleID: 1,
605+
URIsByLocalityKV: map[string]string{
606+
"region=us-east-2": "nodelocal://1/backup",
607+
},
608+
},
609+
"locality aware backups not supported for compaction",
610+
},
684611
}
685612

686613
for _, tc := range testcases {

0 commit comments

Comments
 (0)