Skip to content

Commit 390e834

Browse files
committed
polishing after merge #1248, thanks @KimDoKy
Signed-off-by: Slach <[email protected]>
1 parent 4df8f8e commit 390e834

File tree

3 files changed

+5
-7
lines changed

3 files changed

+5
-7
lines changed

ChangeLog.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
IMPROVEMENTS
33
- add `GCS_SA_EMAIL` option to support service account authorization, fix [1246](https://github.com/Altinity/clickhouse-backup/pull/1246) thanks @kamushadenes
44
- improve `--hardlink-exists-files` behavior, will look to exists local backups for the same part to avoid download unnecessary, fix [1244](https://github.com/Altinity/clickhouse-backup/issues/1244)
5+
- add `S3_CHUNK_SIZE` option to allow more flexible workload for S3-compatible remote storage, fix [1248](https://github.com/Altinity/clickhouse-backup/pull/1248) thanks @KimDoKy
56

67
# v2.6.35
78
BUG FIXES

pkg/storage/s3.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -262,13 +262,12 @@ func (s *S3) GetFileReaderWithLocalPath(ctx context.Context, key, localPath stri
262262
// Use configured chunk size
263263
partSize = s.Config.ChunkSize
264264
} else {
265-
partSize := remoteSize / s.Config.MaxPartsCount
265+
partSize = remoteSize / s.Config.MaxPartsCount
266266
if remoteSize%s.Config.MaxPartsCount > 0 {
267267
partSize += max(1, (remoteSize%s.Config.MaxPartsCount)/s.Config.MaxPartsCount)
268268
}
269269
}
270270
downloader.PartSize = AdjustValueByRange(partSize, 5*1024*1024, 5*1024*1024*1024)
271-
log.Debug().Msgf("S3 Download PartSize: %d bytes (%.2f MB)", downloader.PartSize, float64(downloader.PartSize)/(1024*1024))
272271

273272
_, err = downloader.Download(ctx, writer, &s3.GetObjectInput{
274273
Bucket: aws.String(s.Config.Bucket),
@@ -338,13 +337,12 @@ func (s *S3) PutFileAbsolute(ctx context.Context, key string, r io.ReadCloser, l
338337
if s.Config.ChunkSize > 0 && (localSize+s.Config.ChunkSize-1)/s.Config.ChunkSize < 10000 {
339338
partSize = s.Config.ChunkSize
340339
} else {
341-
partSize := localSize / s.Config.MaxPartsCount
340+
partSize = localSize / s.Config.MaxPartsCount
342341
if localSize%s.Config.MaxPartsCount > 0 {
343342
partSize += max(1, (localSize%s.Config.MaxPartsCount)/s.Config.MaxPartsCount)
344343
}
345344
}
346345
uploader.PartSize = AdjustValueByRange(partSize, 5*1024*1024, 5*1024*1024*1024)
347-
log.Debug().Msgf("S3 Upload PartSize: %d bytes (%.2f MB)", uploader.PartSize, float64(uploader.PartSize)/(1024*1024))
348346

349347
_, err := uploader.Upload(ctx, &params)
350348
return err
@@ -551,13 +549,12 @@ func (s *S3) CopyObject(ctx context.Context, srcSize int64, srcBucket, srcKey, d
551549
if s.Config.ChunkSize > 0 && (srcSize+s.Config.ChunkSize-1)/s.Config.ChunkSize < 10000 {
552550
partSize = s.Config.ChunkSize
553551
} else {
554-
partSize := srcSize / s.Config.MaxPartsCount
552+
partSize = srcSize / s.Config.MaxPartsCount
555553
if srcSize%s.Config.MaxPartsCount > 0 {
556554
partSize += max(1, (srcSize%s.Config.MaxPartsCount)/s.Config.MaxPartsCount)
557555
}
558556
}
559557
partSize = AdjustValueByRange(partSize, 128*1024*1024, 5*1024*1024*1024)
560-
log.Debug().Msgf("S3 CopyObject PartSize: %d bytes (%.2f MB)", partSize, float64(partSize)/(1024*1024))
561558

562559
// Calculate the number of parts
563560
numParts := (srcSize + partSize - 1) / partSize

test/integration/run.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ if [[ "0" == "${TEST_FAILED}" ]]; then
105105
go tool covdata textfmt -i "${CUR_DIR}/_coverage_/" -o "${CUR_DIR}/_coverage_/coverage.out"
106106
fi
107107

108-
if [[ "1" == "${CLEAN_AFTER:-0}" || "0" == "${TEST_FAILED}" ]]; then
108+
if [[ "0" == "${TEST_FAILED}" && "1" == "${CLEAN_AFTER:-1}" ]]; then
109109
pids=()
110110
for project in $(docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" ls --all -q); do
111111
docker compose -f "${CUR_DIR}/${COMPOSE_FILE}" --project-name "${project}" --progress plain down --remove-orphans --volumes --timeout=1 &

0 commit comments

Comments
 (0)