Skip to content

Commit 63f8071

Browse files
craig[bot]bghalkev-cao
committed
150747: sql: unblock user table changes from long-running transactions r=bghal a=bghal sql: unblock user table changes from long-running transactions Currently, the `CREATE USER` and the `GRANT role` operations use a bumped descriptor to invalidate caches of the user table. This blocked those operations until long-running transactions using the old table version committed. This change adds special handling of version bumps to wait for visibility of the change rather than convergence across the cluster. It no longer creates (blocking) jobs for the version bumps. Epic: CRDB-49398 Fixes: #138691 Release note (sql change): The `CREATE USER` and `GRANT role` operations now wait for full-cluster visibility of the new user table version rather than blocking on convergence. 152015: backup: remove old incremental backup location lookup r=dt a=kev-cao In pre-22.2 backups, incremental backups were written to the same directory as the full backup. This was removed in 22.2, but logic was kept in place to maintain backwards compatibility and allow later versions to restore from pre-22.2 backups. However, given that we are now on 25.4 and are many major versions past the removal of the old default location, the legacy logic is now being removed to facilitate the development of backup indexes. Epic: None Release note: None Co-authored-by: Brendan Gerrity <[email protected]> Co-authored-by: Kevin Cao <[email protected]>
3 parents 8198062 + 65c953b + b92948d commit 63f8071

File tree

17 files changed

+432
-292
lines changed

17 files changed

+432
-292
lines changed

pkg/backup/backup_test.go

Lines changed: 0 additions & 112 deletions
Original file line numberDiff line numberDiff line change
@@ -9958,118 +9958,6 @@ func TestUserfileNormalizationIncrementalShowBackup(t *testing.T) {
99589958
sqlDB.Exec(t, query)
99599959
}
99609960

9961-
// TestBackupRestoreOldIncrementalDefaults tests that a call to restore
9962-
// will correctly load an incremental backup in the old "default" directory.
9963-
// That old default is literally just "the same directory as the full backup",
9964-
// so write that manually with the `incremental_location` option then check that
9965-
// a read without that option contains the same data.
9966-
func TestBackupRestoreOldIncrementalDefault(t *testing.T) {
9967-
defer leaktest.AfterTest(t)()
9968-
defer log.Scope(t).Close(t)
9969-
9970-
skip.UnderRace(t, "multinode cluster setup times out under race, likely due to resource starvation.")
9971-
9972-
const numAccounts = 1
9973-
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, multiNode, numAccounts, InitManualReplication)
9974-
defer cleanupFn()
9975-
9976-
const c1, c2, c3 = `nodelocal://1/full/`, `nodelocal://1/full/`, `nodelocal://2/full/`
9977-
9978-
// Deliberately the same. We're simulating an incremental backup in the old
9979-
// default directory, i.e. the top-level collection directory.
9980-
const i1, i2, i3 = `nodelocal://1/full/`, `nodelocal://1/full/`, `nodelocal://2/full/`
9981-
9982-
collections := []string{
9983-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c1, url.QueryEscape("default")),
9984-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c2, url.QueryEscape("dc=dc1")),
9985-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", c3, url.QueryEscape("dc=dc2")),
9986-
}
9987-
9988-
incrementals := []string{
9989-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i1, url.QueryEscape("default")),
9990-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i2, url.QueryEscape("dc=dc1")),
9991-
fmt.Sprintf("'%s?COCKROACH_LOCALITY=%s'", i3, url.QueryEscape("dc=dc2")),
9992-
}
9993-
tests := []struct {
9994-
dest []string
9995-
inc []string
9996-
}{{dest: []string{collections[0]}, inc: []string{incrementals[0]}},
9997-
{dest: collections, inc: incrementals}}
9998-
9999-
for _, br := range tests {
10000-
dest := strings.Join(br.dest, ", ")
10001-
inc := strings.Join(br.inc, ", ")
10002-
10003-
if len(br.dest) > 1 {
10004-
dest = "(" + dest + ")"
10005-
inc = "(" + inc + ")"
10006-
}
10007-
// create db
10008-
sqlDB.Exec(t, `CREATE DATABASE fkdb`)
10009-
sqlDB.Exec(t, `CREATE TABLE fkdb.fk (ind INT)`)
10010-
10011-
for i := 0; i < 10; i++ {
10012-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, i)
10013-
}
10014-
fb := fmt.Sprintf("BACKUP DATABASE fkdb INTO %s", dest)
10015-
sqlDB.Exec(t, fb)
10016-
10017-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, 200)
10018-
10019-
sib := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", dest, inc)
10020-
sqlDB.Exec(t, sib)
10021-
10022-
sir := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'inc_fkdb'", dest)
10023-
sqlDB.Exec(t, sir)
10024-
10025-
sqlDB.CheckQueryResults(t, `SELECT * FROM inc_fkdb.fk`, sqlDB.QueryStr(t, `SELECT * FROM fkdb.fk`))
10026-
10027-
sqlDB.Exec(t, "DROP DATABASE fkdb")
10028-
sqlDB.Exec(t, "DROP DATABASE inc_fkdb;")
10029-
}
10030-
}
10031-
10032-
func TestBackupRestoreErrorsOnBothDefaultsPopulated(t *testing.T) {
10033-
defer leaktest.AfterTest(t)()
10034-
defer log.Scope(t).Close(t)
10035-
10036-
const numAccounts = 1
10037-
_, sqlDB, _, cleanupFn := backupRestoreTestSetup(t, singleNode, numAccounts, InitManualReplication)
10038-
defer cleanupFn()
10039-
10040-
base := `'nodelocal://1/full/'`
10041-
oldInc := base
10042-
newInc := `'nodelocal://1/full/incrementals'`
10043-
10044-
// create db
10045-
sqlDB.Exec(t, `CREATE DATABASE fkdb`)
10046-
sqlDB.Exec(t, `CREATE TABLE fkdb.fk (ind INT)`)
10047-
10048-
for i := 0; i < 10; i++ {
10049-
sqlDB.Exec(t, `INSERT INTO fkdb.fk (ind) VALUES ($1)`, i)
10050-
}
10051-
fb := fmt.Sprintf("BACKUP DATABASE fkdb INTO %s", base)
10052-
sqlDB.Exec(t, fb)
10053-
10054-
sibOld := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, oldInc)
10055-
sqlDB.Exec(t, sibOld)
10056-
10057-
sibNew := fmt.Sprintf("BACKUP DATABASE fkdb INTO LATEST IN %s WITH incremental_location=%s", base, newInc)
10058-
sqlDB.Exec(t, sibNew)
10059-
10060-
irDefault := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb'", base)
10061-
sqlDB.ExpectErr(t, "pq: Incremental layers found in both old and new default locations. "+
10062-
"Please choose a location manually with the `incremental_location` parameter.", irDefault)
10063-
10064-
irOld := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb_old_default', "+
10065-
"incremental_location=%s", base, base)
10066-
sqlDB.Exec(t, irOld)
10067-
10068-
irNew := fmt.Sprintf("RESTORE DATABASE fkdb FROM LATEST IN %s WITH new_db_name = 'trad_fkdb_new_default', "+
10069-
"incremental_location=%s", base, newInc)
10070-
sqlDB.Exec(t, irNew)
10071-
}
10072-
100739961
// TestBackupRestoreSeparateExplicitIsDefault tests that a backup/restore round
100749962
// trip using the 'incremental_location' parameter restores the same db as a BR
100759963
// round trip without the parameter, even when that location is in fact the default.

pkg/backup/backupdest/backup_destination_test.go

Lines changed: 1 addition & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -133,10 +133,6 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
133133
inc5Time := inc4Time.Add(time.Minute * 30)
134134
inc6Time := inc5Time.Add(time.Minute * 30)
135135
inc7Time := inc6Time.Add(time.Minute * 30)
136-
full3Time := inc7Time.Add(time.Minute * 30)
137-
inc8Time := full3Time.Add(time.Minute * 30)
138-
inc9Time := inc8Time.Add(time.Minute * 30)
139-
full4Time := inc9Time.Add(time.Minute * 30)
140136

141137
// firstBackupChain is maintained throughout the tests as the history of
142138
// backups that were taken based on the initial full backup.
@@ -155,6 +151,7 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
155151
testCollectionBackup := func(t *testing.T, backupTime time.Time,
156152
expectedDefault, expectedSuffix, expectedIncDir string, expectedPrevBackups []string,
157153
appendToLatest bool, subdir string, incrementalTo []string) {
154+
t.Helper()
158155

159156
endTime := hlc.Timestamp{WallTime: backupTime.UnixNano()}
160157

@@ -335,63 +332,6 @@ func TestBackupRestoreResolveDestination(t *testing.T) {
335332
true /* intoLatest */, expectedSubdir, customIncrementalTo)
336333
writeManifest(t, expectedDefault, inc7Time)
337334
}
338-
339-
// A new full backup: BACKUP INTO collection
340-
var backup3Location string
341-
{
342-
expectedSuffix := "/2020/12/25-103000.00"
343-
expectedIncDir := ""
344-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s?AUTH=implicit", t.Name(), expectedSuffix)
345-
backup3Location = expectedDefault
346-
347-
testCollectionBackup(t, full3Time,
348-
expectedDefault, expectedSuffix, expectedIncDir, []string(nil),
349-
false /* intoLatest */, noExplicitSubDir, noIncrementalStorage)
350-
writeManifest(t, expectedDefault, full3Time)
351-
// We also wrote a new full backup, so let's update the latest.
352-
writeLatest(t, collectionLoc, expectedSuffix)
353-
}
354-
355-
// A remote incremental into the third full backup: BACKUP INTO LATEST
356-
// IN collection, BUT with a trick. Write a (fake) incremental backup
357-
// to the old directory, to be sure that subsequent incremental backups
358-
// go there as well (and not the newer incrementals/ subdir.)
359-
{
360-
expectedSuffix := "/2020/12/25-103000.00"
361-
expectedIncDir := "/20201225/110000.00-20201225-103000.00"
362-
363-
// Writes the (fake) incremental backup.
364-
oldStyleDefault := fmt.Sprintf("nodelocal://1/%s%s%s?AUTH=implicit",
365-
t.Name(),
366-
expectedSuffix, expectedIncDir)
367-
writeManifest(t, oldStyleDefault, inc8Time)
368-
369-
expectedSuffix = "/2020/12/25-103000.00"
370-
expectedIncDir = "/20201225/113000.00-20201225-110000.00"
371-
expectedSubdir := expectedSuffix
372-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s%s?AUTH=implicit",
373-
t.Name(),
374-
expectedSuffix, expectedIncDir)
375-
376-
testCollectionBackup(t, inc9Time,
377-
expectedDefault, expectedSuffix, expectedIncDir, []string{backup3Location, oldStyleDefault},
378-
true /* intoLatest */, expectedSubdir, noIncrementalStorage)
379-
writeManifest(t, expectedDefault, inc9Time)
380-
}
381-
382-
// A new full backup: BACKUP INTO collection
383-
{
384-
expectedSuffix := "/2020/12/25-120000.00"
385-
expectedIncDir := ""
386-
expectedDefault := fmt.Sprintf("nodelocal://1/%s%s?AUTH=implicit", t.Name(), expectedSuffix)
387-
388-
testCollectionBackup(t, full4Time,
389-
expectedDefault, expectedSuffix, expectedIncDir, []string(nil),
390-
false /* intoLatest */, noExplicitSubDir, noIncrementalStorage)
391-
writeManifest(t, expectedDefault, full4Time)
392-
// We also wrote a new full backup, so let's update the latest.
393-
writeLatest(t, collectionLoc, expectedSuffix)
394-
}
395335
})
396336
})
397337
}

pkg/backup/backupdest/incrementals.go

Lines changed: 1 addition & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -196,45 +196,16 @@ func ResolveIncrementalsBackupLocation(
196196
return incPaths, nil
197197
}
198198

199-
resolvedIncrementalsBackupLocationOld, err := backuputils.AppendPaths(fullBackupCollections, subdir)
200-
if err != nil {
201-
return nil, err
202-
}
203-
204-
// We can have >1 full backup collection specified, but each will have an
205-
// incremental layer iff all of them do. So it suffices to check only the
206-
// first.
207-
// Check we can read from this location, though we don't need the backups here.
208-
prevOld, err := backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocationOld[0])
209-
if err != nil {
210-
return nil, err
211-
}
212-
213199
resolvedIncrementalsBackupLocation, err := backuputils.AppendPaths(fullBackupCollections, backupbase.DefaultIncrementalsSubdir, subdir)
214200
if err != nil {
215201
return nil, err
216202
}
217203

218-
prev, err := backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocation[0])
204+
_, err = backupsFromLocation(ctx, user, execCfg, resolvedIncrementalsBackupLocation[0])
219205
if err != nil {
220206
return nil, err
221207
}
222208

223-
// TODO(bardin): This algorithm divides "destination resolution" and "actual backup lookup" for historical reasons,
224-
// but this doesn't quite make sense now that destination resolution depends on backup lookup.
225-
// Try to figure out a clearer way to organize this.
226-
if len(prevOld) > 0 && len(prev) > 0 {
227-
return nil, errors.New(
228-
"Incremental layers found in both old and new default locations. " +
229-
"Please choose a location manually with the `incremental_location` parameter.")
230-
}
231-
232-
// If we have backups in the old default location, continue to use the old location.
233-
if len(prevOld) > 0 {
234-
return resolvedIncrementalsBackupLocationOld, nil
235-
}
236-
237-
// Otherwise, use the new location.
238209
return resolvedIncrementalsBackupLocation, nil
239210
}
240211

pkg/bench/rttanalysis/testdata/benchmark_expectations

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
exp,benchmark
2-
14,AlterRole/alter_role_with_1_option
3-
17,AlterRole/alter_role_with_2_options
4-
23,AlterRole/alter_role_with_3_options
2+
12,AlterRole/alter_role_with_1_option
3+
15,AlterRole/alter_role_with_2_options
4+
19,AlterRole/alter_role_with_3_options
55
13,AlterTableAddCheckConstraint/alter_table_add_1_check_constraint
66
13,AlterTableAddCheckConstraint/alter_table_add_2_check_constraints
77
13,AlterTableAddCheckConstraint/alter_table_add_3_check_constraints
@@ -15,9 +15,9 @@ exp,benchmark
1515
14,AlterTableConfigureZone/alter_table_configure_zone_5_replicas
1616
14,AlterTableConfigureZone/alter_table_configure_zone_7_replicas_
1717
14,AlterTableConfigureZone/alter_table_configure_zone_ranges
18-
21,AlterTableDropColumn/alter_table_drop_1_column
19-
21,AlterTableDropColumn/alter_table_drop_2_columns
20-
21,AlterTableDropColumn/alter_table_drop_3_columns
18+
22,AlterTableDropColumn/alter_table_drop_1_column
19+
22,AlterTableDropColumn/alter_table_drop_2_columns
20+
22,AlterTableDropColumn/alter_table_drop_3_columns
2121
13,AlterTableDropConstraint/alter_table_drop_1_check_constraint
2222
13,AlterTableDropConstraint/alter_table_drop_2_check_constraints
2323
13,AlterTableDropConstraint/alter_table_drop_3_check_constraints
@@ -28,20 +28,20 @@ exp,benchmark
2828
11,AlterTableUnsplit/alter_table_unsplit_at_2_values
2929
14,AlterTableUnsplit/alter_table_unsplit_at_3_values
3030
2-4,Audit/select_from_an_audit_table
31-
20,CreateRole/create_role_with_1_option
32-
23,CreateRole/create_role_with_2_options
33-
26,CreateRole/create_role_with_3_options
34-
21,CreateRole/create_role_with_no_options
31+
13,CreateRole/create_role_with_1_option
32+
16,CreateRole/create_role_with_2_options
33+
19,CreateRole/create_role_with_3_options
34+
14,CreateRole/create_role_with_no_options
3535
14,"Discard/DISCARD_ALL,_1_tables_in_1_db"
3636
19,"Discard/DISCARD_ALL,_2_tables_in_2_dbs"
3737
0,"Discard/DISCARD_ALL,_no_tables"
3838
15,DropDatabase/drop_database_0_tables
3939
16,DropDatabase/drop_database_1_table
4040
16,DropDatabase/drop_database_2_tables
4141
16,DropDatabase/drop_database_3_tables
42-
29-30,DropRole/drop_1_role
43-
37,DropRole/drop_2_roles
44-
45-49,DropRole/drop_3_roles
42+
22,DropRole/drop_1_role
43+
30,DropRole/drop_2_roles
44+
39,DropRole/drop_3_roles
4545
13,DropSequence/drop_1_sequence
4646
15,DropSequence/drop_2_sequences
4747
17,DropSequence/drop_3_sequences
@@ -59,8 +59,8 @@ exp,benchmark
5959
8,Grant/grant_all_on_1_table
6060
8,Grant/grant_all_on_2_tables
6161
8,Grant/grant_all_on_3_tables
62-
15,GrantRole/grant_1_role
63-
19,GrantRole/grant_2_roles
62+
12,GrantRole/grant_1_role
63+
16,GrantRole/grant_2_roles
6464
12-15,Jobs/cancel_job
6565
3,Jobs/crdb_internal.system_jobs
6666
3-5,Jobs/jobs_page_default
@@ -79,19 +79,19 @@ exp,benchmark
7979
4,ORMQueries/django_column_introspection_1_table
8080
4,ORMQueries/django_column_introspection_4_tables
8181
4,ORMQueries/django_column_introspection_8_tables
82-
6,ORMQueries/django_comment_introspection_with_comments
83-
6,ORMQueries/django_table_introspection_1_table
84-
6,ORMQueries/django_table_introspection_8_tables
82+
5,ORMQueries/django_comment_introspection_with_comments
83+
5,ORMQueries/django_table_introspection_1_table
84+
5,ORMQueries/django_table_introspection_8_tables
8585
0,ORMQueries/has_column_privilege_using_attnum
8686
0,ORMQueries/has_column_privilege_using_column_name
8787
0,ORMQueries/has_schema_privilege
8888
0,ORMQueries/has_sequence_privilege
8989
0,ORMQueries/has_table_privilege
90-
5,ORMQueries/hasura_column_descriptions
90+
6,ORMQueries/hasura_column_descriptions
9191
20,ORMQueries/hasura_column_descriptions_8_tables
9292
5,ORMQueries/hasura_column_descriptions_modified
9393
4,ORMQueries/information_schema._pg_index_position
94-
4,ORMQueries/introspection_description_join
94+
5,ORMQueries/introspection_description_join
9595
4,ORMQueries/liquibase_migrations
9696
8,ORMQueries/liquibase_migrations_on_multiple_dbs
9797
5,ORMQueries/npgsql_fields
@@ -105,14 +105,14 @@ exp,benchmark
105105
3,ORMQueries/pg_namespace
106106
4,ORMQueries/pg_type
107107
6,ORMQueries/prisma_column_descriptions
108-
3,ORMQueries/prisma_column_descriptions_updated
108+
4,ORMQueries/prisma_column_descriptions_updated
109109
5,ORMQueries/prisma_types_16
110110
5,ORMQueries/prisma_types_4
111111
8,Revoke/revoke_all_on_1_table
112112
8,Revoke/revoke_all_on_2_tables
113113
8,Revoke/revoke_all_on_3_tables
114-
13,RevokeRole/revoke_1_role
115-
15,RevokeRole/revoke_2_roles
114+
10,RevokeRole/revoke_1_role
115+
12,RevokeRole/revoke_2_roles
116116
4,ShowGrants/grant_2_roles
117117
4,ShowGrants/grant_3_roles
118118
4,ShowGrants/grant_4_roles
@@ -129,7 +129,7 @@ exp,benchmark
129129
0,UDFResolution/select_from_udf
130130
2,UseManyRoles/use_2_roles
131131
2,UseManyRoles/use_50_roles
132-
1,VirtualTableQueries/select_crdb_internal.invalid_objects_with_1_fk
132+
2,VirtualTableQueries/select_crdb_internal.invalid_objects_with_1_fk
133133
1,VirtualTableQueries/select_crdb_internal.tables_with_1_fk
134134
0,VirtualTableQueries/show_create_all_routines
135135
0,VirtualTableQueries/show_create_all_triggers

pkg/cli/testdata/doctor/test_examine_cluster

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,5 +3,5 @@ debug doctor examine cluster
33
debug doctor examine cluster
44
Examining 69 descriptors and 68 namespace entries...
55
ParentID 100, ParentSchemaID 101: relation "foo" (105): expected matching namespace entry, found none
6-
Examining 13 jobs...
6+
Examining 12 jobs...
77
ERROR: validation failed

pkg/cli/testdata/doctor/test_examine_cluster_dropped

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,5 +2,5 @@ debug doctor examine cluster
22
----
33
debug doctor examine cluster
44
Examining 68 descriptors and 68 namespace entries...
5-
Examining 11 jobs...
5+
Examining 10 jobs...
66
No problems found!

0 commit comments

Comments
 (0)