Skip to content

Commit 05613b4

Browse files
committed
add support \ and / special characters in table name and database name, fix #1091
Signed-off-by: Slach <bloodjazman@gmail.com>
1 parent 092ba64 commit 05613b4

File tree

9 files changed

+68
-52
lines changed

9 files changed

+68
-52
lines changed

ChangeLog.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@ IMPROVEMENTS
99
during upload and download, fix [854](https://github.com/Altinity/clickhouse-backup/issues/854)
1010
- add `--configs-only` and `--rbac-only` options to `upload` and `download` command,
1111
fix [1042](https://github.com/Altinity/clickhouse-backup/issues/1042)
12+
- add support `\` and `/` special characters in table name and database name,
13+
fix [1091](https://github.com/Altinity/clickhouse-backup/issues/1091)
1214

1315
BUG FIXES
1416

cmd/clickhouse-backup/main.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ func main() {
395395
UsageText: "clickhouse-backup restore [-t, --tables=<db>.<table>] [-m, --restore-database-mapping=<originDB>:<targetDB>[,<...>]] [--tm, --restore-table-mapping=<originTable>:<targetTable>[,<...>]] [--partitions=<partitions_names>] [-s, --schema] [-d, --data] [--rm, --drop] [-i, --ignore-dependencies] [--rbac] [--configs] [--resume] <backup_name>",
396396
Action: func(c *cli.Context) error {
397397
b := backup.NewBackuper(config.GetConfigFromCli(c))
398-
return b.Restore(c.Args().First(), c.String("t"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("data"), c.Bool("drop"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id"))
398+
return b.Restore(c.Args().First(), c.String("tables"), c.StringSlice("restore-database-mapping"), c.StringSlice("restore-table-mapping"), c.StringSlice("partitions"), c.StringSlice("skip-projections"), c.Bool("schema"), c.Bool("data"), c.Bool("drop"), c.Bool("ignore-dependencies"), c.Bool("rbac"), c.Bool("rbac-only"), c.Bool("configs"), c.Bool("configs-only"), c.Bool("resume"), version, c.Int("command-id"))
399399
},
400400
Flags: append(cliapp.Flags,
401401
cli.StringFlag{

pkg/backup/list.go

Lines changed: 7 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -502,8 +502,13 @@ func (b *Backuper) GetTablesRemote(ctx context.Context, backupName string, table
502502
tableName := fmt.Sprintf("%s.%s", t.Database, t.Table)
503503
shallSkipped := b.shouldSkipByTableName(tableName)
504504
matched := false
505-
for _, p := range tablePatterns {
506-
if matched, _ = filepath.Match(strings.Trim(p, " \t\r\n"), tableName); matched {
505+
for _, pattern := range tablePatterns {
506+
// https://github.com/Altinity/clickhouse-backup/issues/1091
507+
if pattern == "*" {
508+
matched = true
509+
break
510+
}
511+
if matched, _ = filepath.Match(strings.Trim(pattern, " \t\r\n"), tableName); matched {
507512
break
508513
}
509514
}

pkg/backup/restore.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,8 @@ func (b *Backuper) Restore(backupName, tablePattern string, databaseMapping, tab
209209
var tablesForRestore ListOfTables
210210
var partitionsNames map[metadata.TableTitle][]string
211211
if tablePattern == "" {
212-
tablePattern = "*"
212+
// https://github.com/Altinity/clickhouse-backup/issues/1091
213+
tablePattern = "*,*/*"
213214
}
214215
metadataPath := path.Join(b.DefaultDataPath, "backup", backupName, "metadata")
215216
if b.isEmbedded && b.cfg.ClickHouse.EmbeddedBackupDisk != "" {

pkg/backup/table_pattern.go

Lines changed: 13 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -79,11 +79,12 @@ func (b *Backuper) getTableListByPatternLocal(ctx context.Context, metadataPath
7979
p = strings.TrimSuffix(p, ".json")
8080
}
8181
names, database, table, tableName, shallSkipped, continueProcessing := b.checkShallSkipped(p, metadataPath)
82-
if !continueProcessing {
82+
if !continueProcessing || shallSkipped {
8383
return nil
8484
}
85-
for _, p := range tablePatterns {
86-
if matched, _ := filepath.Match(strings.Trim(p, " \t\r\n"), tableName); !matched || shallSkipped {
85+
for _, pattern := range tablePatterns {
86+
// https://github.com/Altinity/clickhouse-backup/issues/1091
87+
if matched, _ := filepath.Match(strings.Trim(pattern, " \t\r\n"), strings.Replace(tableName, "/", "_", -1)); !matched {
8788
continue
8889
}
8990
data, err := os.ReadFile(filePath)
@@ -240,11 +241,12 @@ func (b *Backuper) enrichTablePatternsByInnerDependencies(metadataPath string, t
240241
return nil
241242
}
242243
names, database, table, tableName, shallSkipped, continueProcessing := b.checkShallSkipped(strings.TrimSuffix(filepath.ToSlash(filePath), ".json"), metadataPath)
243-
if !continueProcessing {
244+
if !continueProcessing || shallSkipped {
244245
return nil
245246
}
246-
for _, p := range tablePatterns {
247-
if matched, _ := filepath.Match(strings.Trim(p, " \t\r\n"), tableName); !matched || shallSkipped {
247+
for _, pattern := range tablePatterns {
248+
// https://github.com/Altinity/clickhouse-backup/issues/1091
249+
if matched, _ := filepath.Match(strings.Trim(pattern, " \t\r\n"), strings.Replace(tableName, "/", "_", -1)); !matched {
248250
continue
249251
}
250252
data, err := os.ReadFile(filePath)
@@ -500,12 +502,13 @@ func getTableListByPatternRemote(ctx context.Context, b *Backuper, remoteBackupM
500502
continue
501503
}
502504
tablePatterns:
503-
for _, p := range tablePatterns {
505+
for _, pattern := range tablePatterns {
504506
select {
505507
case <-ctx.Done():
506508
return nil, ctx.Err()
507509
default:
508-
if matched, _ := filepath.Match(strings.Trim(p, " \t\r\n"), tableName); !matched {
510+
// https://github.com/Altinity/clickhouse-backup/issues/1091
511+
if matched, _ := filepath.Match(strings.Trim(pattern, " \t\r\n"), strings.Replace(tableName, "/", "_", -1)); !matched {
509512
continue
510513
}
511514
tmReader, err := b.dst.GetFileReader(ctx, path.Join(metadataPath, common.TablePathEncode(t.Database), fmt.Sprintf("%s.json", common.TablePathEncode(t.Table))))
@@ -580,7 +583,8 @@ func parseTablePatternForDownload(tables []metadata.TableTitle, tablePattern str
580583
for _, t := range tables {
581584
for _, pattern := range tablePatterns {
582585
tableName := fmt.Sprintf("%s.%s", t.Database, t.Table)
583-
if matched, _ := filepath.Match(strings.Trim(pattern, " \t\r\n"), tableName); matched {
586+
// https://github.com/Altinity/clickhouse-backup/issues/1091
587+
if matched, _ := filepath.Match(strings.Trim(pattern, " \t\r\n"), strings.Replace(tableName, "/", "_", -1)); matched {
584588
result = append(result, t)
585589
break
586590
}

pkg/clickhouse/clickhouse.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -374,6 +374,9 @@ func (ch *ClickHouse) GetTables(ctx context.Context, tablePattern string) ([]Tab
374374
if err = ch.SelectContext(ctx, &tables, allTablesSQL); err != nil {
375375
return nil, err
376376
}
377+
for i := range tables {
378+
tables[i].CreateTableQuery = strings.ReplaceAll(tables[i].CreateTableQuery, `\\`, `\`)
379+
}
377380
metadataPath, err := ch.getMetadataPath(ctx)
378381
if err != nil {
379382
return nil, err

pkg/filesystemhelper/filesystemhelper.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -317,10 +317,10 @@ func MoveShadowToBackup(shadowPath, backupPartsPath string, partitionsBackupMap
317317
}
318318

319319
func IsSkipProjections(skipProjections []string, relativePath string) bool {
320-
log.Debug().Msgf("try IsSkipProjections, skipProjections=%v, relativePath=%s", skipProjections, relativePath)
321320
if skipProjections == nil || len(skipProjections) == 0 {
322321
return false
323322
}
323+
log.Debug().Msgf("try IsSkipProjections, skipProjections=%v, relativePath=%s", skipProjections, relativePath)
324324

325325
matchPattenFinal := func(dbPattern string, tablePattern string, projectionPattern string, relativePath string) bool {
326326
finalPattern := path.Join(dbPattern, tablePattern, "*", projectionPattern+".proj", "*")

pkg/partition/partition.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,8 @@ func ConvertPartitionsToIdsMapAndNamesList(ctx context.Context, ch *clickhouse.C
249249
}
250250

251251
func addItemToIdMapAndNameListIfNotExists(partitionId, partitionName, database, table string, partitionsIdMap map[metadata.TableTitle]common.EmptyMap, partitionsNameList map[metadata.TableTitle][]string, tablePattern string) {
252-
if matched, err := filepath.Match(tablePattern, database+"."+table); err == nil && matched {
252+
// https://github.com/Altinity/clickhouse-backup/issues/1091
253+
if matched, err := filepath.Match(tablePattern, database+"."+table); err == nil && matched || tablePattern == "*" {
253254
if partitionId != "" {
254255
partitionsIdMap[metadata.TableTitle{
255256
Database: database, Table: table,

test/integration/integration_test.go

Lines changed: 37 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -79,12 +79,12 @@ func init() {
7979
dockerPool.Config.MaxTotal = runParallelInt
8080
}
8181

82-
const dbNameAtomic = "_test#$.ДБ_atomic_"
83-
const dbNameOrdinary = "_test#$.ДБ_ordinary_"
82+
const dbNameAtomic = "_test#$.ДБ_atomic_/issue\\_1091"
83+
const dbNameOrdinary = "_test#$.ДБ_ordinary_/issue\\_1091"
8484
const dbNameMySQL = "mysql_db"
8585
const dbNamePostgreSQL = "pgsql_db"
86-
const Issue331Atomic = "_issue331._atomic_"
87-
const Issue331Ordinary = "_issue331.ordinary_"
86+
const Issue331Issue1091Atomic = "_issue331._atomic_/issue\\_1091"
87+
const Issue331Issue1091Ordinary = "_issue331.ordinary_/issue\\_1091"
8888

8989
type TestDataStruct struct {
9090
Database string
@@ -152,28 +152,28 @@ var defaultTestData = []TestDataStruct{
152152
Fields: []string{"TimeStamp", "Item"},
153153
OrderBy: "TimeStamp",
154154
}, {
155-
Database: Issue331Atomic, DatabaseEngine: "Atomic",
156-
Name: Issue331Atomic, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
157-
Schema: fmt.Sprintf("(`%s` UInt64, Col1 String, Col2 String, Col3 String, Col4 String, Col5 String) ENGINE = MergeTree PARTITION BY `%s` ORDER BY (`%s`, Col1, Col2, Col3, Col4, Col5) SETTINGS index_granularity = 8192", Issue331Atomic, Issue331Atomic, Issue331Atomic),
155+
Database: Issue331Issue1091Atomic, DatabaseEngine: "Atomic",
156+
Name: Issue331Issue1091Atomic, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
157+
Schema: fmt.Sprintf("(`%s` UInt64, Col1 String, Col2 String, Col3 String, Col4 String, Col5 String) ENGINE = MergeTree PARTITION BY `%s` ORDER BY (`%s`, Col1, Col2, Col3, Col4, Col5) SETTINGS index_granularity = 8192", Issue331Issue1091Atomic, Issue331Issue1091Atomic, Issue331Issue1091Atomic),
158158
Rows: func() []map[string]interface{} {
159159
var result []map[string]interface{}
160160
for i := 0; i < 100; i++ {
161-
result = append(result, map[string]interface{}{Issue331Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
161+
result = append(result, map[string]interface{}{Issue331Issue1091Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
162162
}
163163
return result
164164
}(),
165-
Fields: []string{Issue331Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"},
166-
OrderBy: Issue331Atomic + "_{test}",
165+
Fields: []string{Issue331Issue1091Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"},
166+
OrderBy: Issue331Issue1091Atomic + "_{test}",
167167
}, {
168-
Database: Issue331Ordinary, DatabaseEngine: "Ordinary",
169-
Name: Issue331Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
170-
Schema: fmt.Sprintf("(`%s` String, order_time DateTime, amount Float64) ENGINE = MergeTree() PARTITION BY toYYYYMM(order_time) ORDER BY (order_time, `%s`)", Issue331Ordinary, Issue331Ordinary),
168+
Database: Issue331Issue1091Ordinary, DatabaseEngine: "Ordinary",
169+
Name: Issue331Issue1091Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
170+
Schema: fmt.Sprintf("(`%s` String, order_time DateTime, amount Float64) ENGINE = MergeTree() PARTITION BY toYYYYMM(order_time) ORDER BY (order_time, `%s`)", Issue331Issue1091Ordinary, Issue331Issue1091Ordinary),
171171
Rows: []map[string]interface{}{
172-
{Issue331Ordinary: "1", "order_time": toTS("2010-01-01 00:00:00"), "amount": 1.0},
173-
{Issue331Ordinary: "2", "order_time": toTS("2010-02-01 00:00:00"), "amount": 2.0},
172+
{Issue331Issue1091Ordinary: "1", "order_time": toTS("2010-01-01 00:00:00"), "amount": 1.0},
173+
{Issue331Issue1091Ordinary: "2", "order_time": toTS("2010-02-01 00:00:00"), "amount": 2.0},
174174
},
175-
Fields: []string{Issue331Ordinary, "order_time", "amount"},
176-
OrderBy: Issue331Ordinary + "_{test}",
175+
Fields: []string{Issue331Issue1091Ordinary, "order_time", "amount"},
176+
OrderBy: Issue331Issue1091Ordinary + "_{test}",
177177
}, {
178178
Database: dbNameOrdinary, DatabaseEngine: "Ordinary",
179179
Name: "yuzhichang_table3",
@@ -320,17 +320,17 @@ var defaultTestData = []TestDataStruct{
320320
" (`%s` UInt64, Col1 String, Col2 String, Col3 String, Col4 String, Col5 String) PRIMARY KEY `%s` "+
321321
" SOURCE(CLICKHOUSE(host 'localhost' port 9000 db '%s' table '%s' user 'default' password ''))"+
322322
" LAYOUT(HASHED()) LIFETIME(60)",
323-
Issue331Atomic, Issue331Atomic, Issue331Atomic, Issue331Atomic), // same table and name need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
323+
Issue331Issue1091Atomic, Issue331Issue1091Atomic, Issue331Issue1091Atomic, Issue331Issue1091Atomic), // same table and name need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
324324
SkipInsert: true,
325325
Rows: func() []map[string]interface{} {
326326
var result []map[string]interface{}
327327
for i := 0; i < 100; i++ {
328-
result = append(result, map[string]interface{}{Issue331Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
328+
result = append(result, map[string]interface{}{Issue331Issue1091Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
329329
}
330330
return result
331331
}(),
332332
Fields: []string{},
333-
OrderBy: Issue331Atomic + "_{test}",
333+
OrderBy: Issue331Issue1091Atomic + "_{test}",
334334
},
335335
{
336336
Database: dbNameMySQL, DatabaseEngine: "MySQL('mysql:3306','mysql','root','root')",
@@ -389,28 +389,28 @@ var defaultIncrementData = []TestDataStruct{
389389
Fields: []string{"TimeStamp", "Item"},
390390
OrderBy: "TimeStamp",
391391
}, {
392-
Database: Issue331Atomic, DatabaseEngine: "Atomic",
393-
Name: Issue331Atomic, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
394-
Schema: fmt.Sprintf("(`%s` UInt64, Col1 String, Col2 String, Col3 String, Col4 String, Col5 String) ENGINE = MergeTree PARTITION BY `%s` ORDER BY (`%s`, Col1, Col2, Col3, Col4, Col5) SETTINGS index_granularity = 8192", Issue331Atomic, Issue331Atomic, Issue331Atomic),
392+
Database: Issue331Issue1091Atomic, DatabaseEngine: "Atomic",
393+
Name: Issue331Issue1091Atomic, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
394+
Schema: fmt.Sprintf("(`%s` UInt64, Col1 String, Col2 String, Col3 String, Col4 String, Col5 String) ENGINE = MergeTree PARTITION BY `%s` ORDER BY (`%s`, Col1, Col2, Col3, Col4, Col5) SETTINGS index_granularity = 8192", Issue331Issue1091Atomic, Issue331Issue1091Atomic, Issue331Issue1091Atomic),
395395
Rows: func() []map[string]interface{} {
396396
var result []map[string]interface{}
397397
for i := 200; i < 220; i++ {
398-
result = append(result, map[string]interface{}{Issue331Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
398+
result = append(result, map[string]interface{}{Issue331Issue1091Atomic: uint64(i), "Col1": "Text1", "Col2": "Text2", "Col3": "Text3", "Col4": "Text4", "Col5": "Text5"})
399399
}
400400
return result
401401
}(),
402-
Fields: []string{Issue331Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"},
403-
OrderBy: Issue331Atomic + "_{test}",
402+
Fields: []string{Issue331Issue1091Atomic, "Col1", "Col2", "Col3", "Col4", "Col5"},
403+
OrderBy: Issue331Issue1091Atomic + "_{test}",
404404
}, {
405-
Database: Issue331Ordinary, DatabaseEngine: "Ordinary",
406-
Name: Issue331Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
407-
Schema: fmt.Sprintf("(`%s` String, order_time DateTime, amount Float64) ENGINE = MergeTree() PARTITION BY toYYYYMM(order_time) ORDER BY (order_time, `%s`)", Issue331Ordinary, Issue331Ordinary),
405+
Database: Issue331Issue1091Ordinary, DatabaseEngine: "Ordinary",
406+
Name: Issue331Issue1091Ordinary, // need cover fix https://github.com/Altinity/clickhouse-backup/issues/331
407+
Schema: fmt.Sprintf("(`%s` String, order_time DateTime, amount Float64) ENGINE = MergeTree() PARTITION BY toYYYYMM(order_time) ORDER BY (order_time, `%s`)", Issue331Issue1091Ordinary, Issue331Issue1091Ordinary),
408408
Rows: []map[string]interface{}{
409-
{Issue331Ordinary: "3", "order_time": toTS("2010-03-01 00:00:00"), "amount": 3.0},
410-
{Issue331Ordinary: "4", "order_time": toTS("2010-04-01 00:00:00"), "amount": 4.0},
409+
{Issue331Issue1091Ordinary: "3", "order_time": toTS("2010-03-01 00:00:00"), "amount": 3.0},
410+
{Issue331Issue1091Ordinary: "4", "order_time": toTS("2010-04-01 00:00:00"), "amount": 4.0},
411411
},
412-
Fields: []string{Issue331Ordinary, "order_time", "amount"},
413-
OrderBy: Issue331Ordinary + "_{test}",
412+
Fields: []string{Issue331Issue1091Ordinary, "order_time", "amount"},
413+
OrderBy: Issue331Issue1091Ordinary + "_{test}",
414414
}, {
415415
Database: dbNameOrdinary, DatabaseEngine: "Ordinary",
416416
Name: "yuzhichang_table3",
@@ -873,7 +873,7 @@ func TestS3NoDeletePermission(t *testing.T) {
873873
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "restore_remote", "no_delete_backup")
874874
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "local", "no_delete_backup")
875875
r.Error(env.DockerExec("clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup"))
876-
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary}
876+
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Issue1091Atomic, Issue331Issue1091Ordinary}
877877
dropDatabasesFromTestDataDataSet(t, r, env, databaseList)
878878
r.NoError(env.DockerCP("config-s3.yml", "clickhouse-backup:/etc/clickhouse-backup/config.yml"))
879879
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "delete", "remote", "no_delete_backup")
@@ -2038,7 +2038,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) {
20382038
for i := 0; i < 5; i++ {
20392039
backupNames[i] = fmt.Sprintf("keep_remote_backup_%d", i)
20402040
}
2041-
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary}
2041+
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Issue1091Atomic, Issue331Issue1091Ordinary}
20422042
fullCleanup(t, r, env, backupNames, []string{"remote", "local"}, databaseList, false, false, "config-s3.yml")
20432043
incrementData := defaultIncrementData
20442044
generateTestData(t, r, env, "S3", false, defaultTestData)
@@ -2073,7 +2073,7 @@ func TestKeepBackupRemoteAndDiffFromRemote(t *testing.T) {
20732073
}
20742074
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "-c", "/etc/clickhouse-backup/config-s3.yml", "restore", "--rm", latestIncrementBackup)
20752075
var res uint64
2076-
r.NoError(env.ch.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Atomic, t.Name(), Issue331Atomic, t.Name())))
2076+
r.NoError(env.ch.SelectSingleRowNoCtx(&res, fmt.Sprintf("SELECT count() FROM `%s_%s`.`%s_%s`", Issue331Issue1091Atomic, t.Name(), Issue331Issue1091Atomic, t.Name())))
20772077
r.Equal(uint64(100+20*4), res)
20782078
fullCleanup(t, r, env, []string{latestIncrementBackup}, []string{"local"}, nil, true, true, "config-s3.yml")
20792079
fullCleanup(t, r, env, backupNames, []string{"remote"}, databaseList, true, true, "config-s3.yml")
@@ -2617,7 +2617,7 @@ func (env *TestEnvironment) runMainIntegrationScenario(t *testing.T, remoteStora
26172617
fullBackupName := fmt.Sprintf("%s_full_%d", t.Name(), rand.Int())
26182618
incrementBackupName := fmt.Sprintf("%s_increment_%d", t.Name(), rand.Int())
26192619
incrementBackupName2 := fmt.Sprintf("%s_increment2_%d", t.Name(), rand.Int())
2620-
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Atomic, Issue331Ordinary}
2620+
databaseList := []string{dbNameOrdinary, dbNameAtomic, dbNameMySQL, dbNamePostgreSQL, Issue331Issue1091Atomic, Issue331Issue1091Ordinary}
26212621
tablesPattern := fmt.Sprintf("*_%s.*", t.Name())
26222622
log.Debug().Msg("Clean before start")
26232623
fullCleanup(t, r, env, []string{fullBackupName, incrementBackupName}, []string{"remote", "local"}, databaseList, false, false, backupConfig)

0 commit comments

Comments
 (0)