Skip to content

Commit 9d07576

Browse files
committed
remove coverall until resolve coverallsapp/coverage-reporter#180
Signed-off-by: Slach <[email protected]>
1 parent d592187 commit 9d07576

File tree

3 files changed

+77
-61
lines changed

3 files changed

+77
-61
lines changed

.github/workflows/build.yaml

Lines changed: 38 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -55,13 +55,14 @@ jobs:
5555
make build/linux/amd64/clickhouse-backup-fips build/linux/arm64/clickhouse-backup-fips
5656
make build-race build-race-fips config test
5757
58-
- name: Report unittest coverage
59-
uses: coverallsapp/github-action@v2
60-
with:
61-
file: _coverage_/coverage.out
62-
parallel: true
63-
format: golang
64-
flag-name: unittest-${{ matrix.clickhouse }}
58+
# todo return when resolve https://github.com/coverallsapp/coverage-reporter/issues/180
59+
# - name: Report unittest coverage
60+
# uses: coverallsapp/github-action@v2
61+
# with:
62+
# file: _coverage_/coverage.out
63+
# parallel: true
64+
# format: golang
65+
# flag-name: unittest-${{ matrix.clickhouse }}
6566

6667
- name: Extract GCS credentials
6768
id: secrets
@@ -176,14 +177,15 @@ jobs:
176177
ls -la test/testflows/_coverage_
177178
go env
178179
go tool covdata textfmt -i test/testflows/_coverage_/ -o test/testflows/_coverage_/coverage.out
179-
- name: Report testflows coverage
180-
uses: coverallsapp/github-action@v2
181-
with:
182-
base-path: ./
183-
file: test/testflows/_coverage_/coverage.out
184-
parallel: true
185-
format: golang
186-
flag-name: testflows-${{ matrix.clickhouse }}
180+
# todo return when resolve https://github.com/coverallsapp/coverage-reporter/issues/180
181+
# - name: Report testflows coverage
182+
# uses: coverallsapp/github-action@v2
183+
# with:
184+
# base-path: ./
185+
# file: test/testflows/_coverage_/coverage.out
186+
# parallel: true
187+
# format: golang
188+
# flag-name: testflows-${{ matrix.clickhouse }}
187189
# todo possible failures https://github.com/actions/upload-artifact/issues/270
188190
- name: Upload testflows logs
189191
uses: actions/upload-artifact@v4
@@ -343,26 +345,27 @@ jobs:
343345
sudo chmod -Rv a+rw test/integration/_coverage_/
344346
ls -la test/integration/_coverage_
345347
go tool covdata textfmt -i test/integration/_coverage_/ -o test/integration/_coverage_/coverage.out
346-
- name: Report integration coverage
347-
uses: coverallsapp/github-action@v2
348-
with:
349-
base-path: ./
350-
file: test/integration/_coverage_/coverage.out
351-
parallel: true
352-
format: golang
353-
flag-name: integration-${{ matrix.clickhouse }}
354-
coverage:
355-
needs:
356-
- test
357-
- testflows
358-
name: coverage
359-
runs-on: ubuntu-24.04
360-
steps:
361-
- name: Coveralls Finished
362-
uses: coverallsapp/github-action@v2
363-
with:
364-
base-path: ./
365-
parallel-finished: true
348+
# todo return when resolve https://github.com/coverallsapp/coverage-reporter/issues/180
349+
# - name: Report integration coverage
350+
# uses: coverallsapp/github-action@v2
351+
# with:
352+
# base-path: ./
353+
# file: test/integration/_coverage_/coverage.out
354+
# parallel: true
355+
# format: golang
356+
# flag-name: integration-${{ matrix.clickhouse }}
357+
# coverage:
358+
# needs:
359+
# - test
360+
# - testflows
361+
# name: coverage
362+
# runs-on: ubuntu-24.04
363+
# steps:
364+
# - name: Coveralls Finished
365+
# uses: coverallsapp/github-action@v2
366+
# with:
367+
# base-path: ./
368+
# parallel-finished: true
366369
docker:
367370
needs:
368371
- test

ChangeLog.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# v2.6.39
2+
BUG FIXES
3+
- final improvements for check `system.clusters` during restore `engine=Distributed` and is not exists and if not macros use `RESTORE_SCHEMA_ON_CLUSTER` or `CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER` config parameter as cluster value, fix [1252](https://github.com/Altinity/clickhouse-backup/issues/1252)
4+
15
# v2.6.38
26
BUG FIXES
37
- check `system.clusters` during restore `engine=Distributed` and is not exists and if not macros use `CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER` config parameter as cluster value, fix [1252](https://github.com/Altinity/clickhouse-backup/issues/1252)

test/integration/integration_test.go

Lines changed: 35 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -2911,37 +2911,46 @@ func TestRestoreDistributedCluster(t *testing.T) {
29112911
backupName := "test_restore_distributed_cluster"
29122912
env.DockerExecNoError(r, "clickhouse-backup", "clickhouse-backup", "create", "-c", "/etc/clickhouse-backup/config-s3.yml", "--tables="+dbName+".*", backupName)
29132913

2914-
// Get row count before dropping
2915-
var rowCount uint64
2916-
r.NoError(env.ch.SelectSingleRowNoCtx(&rowCount, "SELECT count() FROM "+dbName+"."+tableName+"_dist"))
2917-
r.Equal(uint64(100), rowCount)
2914+
testCases := []struct {
2915+
RestoreDistributedCluster string
2916+
RestoreSchemaOnCluster string
2917+
}{
2918+
{"{cluster}", ""},
2919+
{"", "{cluster}"},
2920+
}
2921+
for _, tc := range testCases {
2922+
// Get row count before dropping
2923+
var rowCount uint64
2924+
r.NoError(env.ch.SelectSingleRowNoCtx(&rowCount, "SELECT count() FROM "+dbName+"."+tableName+"_dist"))
2925+
r.Equal(uint64(100), rowCount)
29182926

2919-
// Drop table and database
2920-
r.NoError(env.dropDatabase(dbName, false))
2927+
// Drop table and database
2928+
r.NoError(env.dropDatabase(dbName, false))
29212929

2922-
// remove cluster and wait configuration reload
2923-
env.DockerExecNoError(r, "clickhouse", "bash", "-c", "rm -rfv /etc/clickhouse-server/config.d/new-cluster.xml")
2924-
env.queryWithNoError(r, "SYSTEM RELOAD CONFIG")
2925-
newClusterExists := uint64(1)
2926-
for i := 0; i < 60 && newClusterExists == 1; i++ {
2927-
r.NoError(env.ch.SelectSingleRowNoCtx(&newClusterExists, "SELECT count() FROM system.clusters WHERE cluster='new_cluster'"))
2928-
if newClusterExists == 0 {
2929-
break
2930+
// remove cluster and wait configuration reload
2931+
env.DockerExecNoError(r, "clickhouse", "bash", "-c", "rm -rfv /etc/clickhouse-server/config.d/new-cluster.xml")
2932+
env.queryWithNoError(r, "SYSTEM RELOAD CONFIG")
2933+
newClusterExists := uint64(1)
2934+
for i := 0; i < 60 && newClusterExists == 1; i++ {
2935+
r.NoError(env.ch.SelectSingleRowNoCtx(&newClusterExists, "SELECT count() FROM system.clusters WHERE cluster='new_cluster'"))
2936+
if newClusterExists == 0 {
2937+
break
2938+
}
2939+
time.Sleep(1 * time.Second)
29302940
}
2931-
time.Sleep(1 * time.Second)
2932-
}
2933-
r.Equal(uint64(0), newClusterExists)
2941+
r.Equal(uint64(0), newClusterExists)
29342942

2935-
// Restore using CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER
2936-
env.DockerExecNoError(r, "clickhouse-backup", "bash", "-c", "RESTORE_SCHEMA_ON_CLUSTER='' CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER={cluster} clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore "+backupName)
2943+
// Restore using `CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER` and `RESTORE_SCHEMA_ON_CLUSTER`
2944+
env.DockerExecNoError(r, "clickhouse-backup", "bash", "-c", fmt.Sprintf("RESTORE_SCHEMA_ON_CLUSTER='%s' CLICKHOUSE_RESTORE_DISTRIBUTED_CLUSTER='%s' clickhouse-backup -c /etc/clickhouse-backup/config-s3.yml restore %s", tc.RestoreSchemaOnCluster, tc.RestoreDistributedCluster, backupName))
29372945

2938-
// Verify data was restored correctly
2939-
r.NoError(env.ch.SelectSingleRowNoCtx(&rowCount, "SELECT count() FROM "+dbName+"."+tableName+"_dist"))
2940-
r.Equal(uint64(100), rowCount)
2941-
var tableDDL string
2942-
r.NoError(env.ch.SelectSingleRowNoCtx(&tableDDL, "SHOW CREATE TABLE "+dbName+"."+tableName+"_dist"))
2943-
r.NotContains(tableDDL, "new_cluster")
2944-
r.Contains(tableDDL, "Distributed('{cluster}'")
2946+
// Verify data was restored correctly
2947+
r.NoError(env.ch.SelectSingleRowNoCtx(&rowCount, "SELECT count() FROM "+dbName+"."+tableName+"_dist"))
2948+
r.Equal(uint64(100), rowCount)
2949+
var tableDDL string
2950+
r.NoError(env.ch.SelectSingleRowNoCtx(&tableDDL, "SHOW CREATE TABLE "+dbName+"."+tableName+"_dist"))
2951+
r.NotContains(tableDDL, "new_cluster")
2952+
r.Contains(tableDDL, "Distributed('{cluster}'")
2953+
}
29452954

29462955
// Clean up
29472956
r.NoError(env.dropDatabase(dbName, false))

0 commit comments

Comments
 (0)