Skip to content

Commit d2e4b5d

Browse files
committed
change retries from constant to exponential backoff and add RETRIES_JITTER configuration option, to avoid same time retries from parallel operation
Signed-off-by: Slach <[email protected]>
1 parent 9a23295 commit d2e4b5d

File tree

12 files changed

+50
-30
lines changed

12 files changed

+50
-30
lines changed

ChangeLog.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,7 @@
1+
# v2.6.25
2+
IMPROVEMENTS
3+
- change retries from constant to exponential backoff and add RETRIES_JITTER configuration option, to avoid same time retries from parallel operation
4+
15
# v2.6.24
26
IMPROVEMENTS
37
- add logs for retries, to allow figure out with blackbaze2 s3 compatible provider, rate limit errors

ReadMe.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -141,6 +141,7 @@ general:
141141

142142
retries_on_failure: 3 # RETRIES_ON_FAILURE, how many times to retry after a failure during upload or download
143143
retries_pause: 5s # RETRIES_PAUSE, duration time to pause after each download or upload failure
144+
retries_jitter: 30 # RETRIES_JITTER, percent of RETRIES_PAUSE for jitter to avoid same time retries from parallel operations
144145

145146
watch_interval: 1h # WATCH_INTERVAL, use only for `watch` command, backup will create every 1h
146147
full_interval: 24h # FULL_INTERVAL, use only for `watch` command, full backup will create every 24h

pkg/backup/create.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -922,7 +922,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string,
922922
}
923923
dstKey := path.Join(backupName, disk.Name, storageObject.ObjectRelativePath)
924924
if !b.cfg.General.AllowObjectDiskStreaming {
925-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
925+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
926926
copyObjectErr = retry.RunCtx(uploadCtx, func(ctx context.Context) error {
927927
if objSize, err = b.dst.CopyObject(ctx, storageObject.ObjectSize, srcBucket, srcKey, dstKey); err != nil {
928928
return err
@@ -941,7 +941,7 @@ func (b *Backuper) uploadObjectDiskParts(ctx context.Context, backupName string,
941941
}
942942
}
943943
if isCopyFailed.Load() {
944-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
944+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
945945
copyObjectErr = retry.RunCtx(uploadCtx, func(ctx context.Context) error {
946946
return object_disk.CopyObjectStreaming(uploadCtx, srcDiskConnection.GetRemoteStorage(), b.dst, srcKey, path.Join(objectDiskPath, dstKey))
947947
})

pkg/backup/download.go

Lines changed: 8 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -439,7 +439,7 @@ func (b *Backuper) downloadTableMetadata(ctx context.Context, backupName string,
439439
}
440440
}
441441
var tmBody []byte
442-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
442+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
443443
err := retry.RunCtx(ctx, func(ctx context.Context) error {
444444
tmReader, err := b.dst.GetFileReader(ctx, remoteMetadataFile)
445445
if err != nil {
@@ -573,7 +573,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st
573573
var downloadErr error
574574
downloadedBytes := int64(0)
575575
if remoteBackup.DataFormat == DirectoryFormat {
576-
if downloadedBytes, downloadErr = b.dst.DownloadPath(ctx, remoteSource, localDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b, b.cfg.General.DownloadMaxBytesPerSecond); downloadErr != nil {
576+
if downloadedBytes, downloadErr = b.dst.DownloadPath(ctx, remoteSource, localDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter, b, b.cfg.General.DownloadMaxBytesPerSecond); downloadErr != nil {
577577
//SFTP can't walk on non exists paths and return error
578578
if !strings.Contains(downloadErr.Error(), "not exist") {
579579
return 0, downloadErr
@@ -592,7 +592,7 @@ func (b *Backuper) downloadBackupRelatedDir(ctx context.Context, remoteBackup st
592592
log.Debug().Msgf("%s not exists on remote storage, skip download", remoteSource)
593593
return 0, nil
594594
}
595-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
595+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
596596
err = retry.RunCtx(ctx, func(ctx context.Context) error {
597597
downloadedBytes, downloadErr = b.dst.DownloadCompressedStream(ctx, remoteSource, localDir, b.cfg.General.DownloadMaxBytesPerSecond)
598598
return downloadErr
@@ -643,7 +643,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.
643643
}
644644

645645
}
646-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
646+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
647647
var downloadedBytes int64
648648
var downloadErr error
649649
err := retry.RunCtx(dataCtx, func(dataCtx context.Context) error {
@@ -701,7 +701,7 @@ func (b *Backuper) downloadTableData(ctx context.Context, remoteBackup metadata.
701701
}
702702
}
703703

704-
pathSize, downloadErr := b.dst.DownloadPath(dataCtx, partRemotePath, partLocalPath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b, b.cfg.General.DownloadMaxBytesPerSecond)
704+
pathSize, downloadErr := b.dst.DownloadPath(dataCtx, partRemotePath, partLocalPath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter, b, b.cfg.General.DownloadMaxBytesPerSecond)
705705
if downloadErr != nil {
706706
return downloadErr
707707
}
@@ -879,7 +879,7 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo
879879
namedLock.Lock()
880880
diffRemoteFilesLock.Unlock()
881881
if path.Ext(tableRemoteFile) != "" {
882-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
882+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
883883
err := retry.RunCtx(ctx, func(ctx context.Context) error {
884884
var downloadErr error
885885
downloadedBytes, downloadErr = b.dst.DownloadCompressedStream(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.DownloadMaxBytesPerSecond)
@@ -891,7 +891,7 @@ func (b *Backuper) downloadDiffRemoteFile(ctx context.Context, diffRemoteFilesLo
891891
}
892892
} else {
893893
// remoteFile could be a directory
894-
if pathSize, err := b.dst.DownloadPath(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b, b.cfg.General.DownloadMaxBytesPerSecond); err != nil {
894+
if pathSize, err := b.dst.DownloadPath(ctx, tableRemoteFile, tableLocalDir, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter, b, b.cfg.General.DownloadMaxBytesPerSecond); err != nil {
895895
log.Warn().Msgf("DownloadPath %s -> %s return error: %v", tableRemoteFile, tableLocalDir, err)
896896
return 0, err
897897
} else {
@@ -1122,8 +1122,7 @@ func (b *Backuper) downloadSingleBackupFile(ctx context.Context, remoteFile stri
11221122
return size, nil
11231123
}
11241124
}
1125-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
1126-
1125+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
11271126
err := retry.RunCtx(ctx, func(ctx context.Context) error {
11281127
remoteReader, err := b.dst.GetFileReader(ctx, remoteFile)
11291128
if err != nil {

pkg/backup/restore.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1657,7 +1657,7 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
16571657
copiedSize := int64(0)
16581658
var copyObjectErr error
16591659
if !b.cfg.General.AllowObjectDiskStreaming {
1660-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
1660+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
16611661
copyObjectErr = retry.RunCtx(downloadCtx, func(ctx context.Context) error {
16621662
var retryErr error
16631663
copiedSize, retryErr = object_disk.CopyObject(downloadCtx, dstDiskName, storageObject.ObjectSize, srcBucket, srcKey, storageObject.ObjectRelativePath)
@@ -1684,7 +1684,7 @@ func (b *Backuper) downloadObjectDiskParts(ctx context.Context, backupName strin
16841684
}
16851685
dstStorage := dstConnection.GetRemoteStorage()
16861686
dstKey := path.Join(dstConnection.GetRemoteObjectDiskPath(), storageObject.ObjectRelativePath)
1687-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
1687+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
16881688
copyObjectErr = retry.RunCtx(downloadCtx, func(ctx context.Context) error {
16891689
return object_disk.CopyObjectStreaming(downloadCtx, srcStorage, dstStorage, srcKey, dstKey)
16901690
})

pkg/backup/upload.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ func (b *Backuper) Upload(backupName string, deleteSource bool, diffFrom, diffFr
235235
}
236236
remoteBackupMetaFile := path.Join(backupName, "metadata.json")
237237
if !b.resume || (b.resume && !b.resumableState.IsAlreadyProcessedBool(remoteBackupMetaFile)) {
238-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
238+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
239239
err = retry.RunCtx(ctx, func(ctx context.Context) error {
240240
return b.dst.PutFile(ctx, remoteBackupMetaFile, io.NopCloser(bytes.NewReader(newBackupMetadataBody)), 0)
241241
})
@@ -324,7 +324,7 @@ func (b *Backuper) uploadSingleBackupFile(ctx context.Context, localFile, remote
324324
log.Warn().Msgf("can't close %v: %v", f, err)
325325
}
326326
}()
327-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
327+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
328328
err = retry.RunCtx(ctx, func(ctx context.Context) error {
329329
return b.dst.PutFile(ctx, remoteFile, f, 0)
330330
})
@@ -460,15 +460,15 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate
460460
}
461461
if b.cfg.GetCompressionFormat() == "none" {
462462
remoteUploadedBytes := int64(0)
463-
if remoteUploadedBytes, err = b.dst.UploadPath(ctx, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b, b.cfg.General.UploadMaxBytesPerSecond); err != nil {
463+
if remoteUploadedBytes, err = b.dst.UploadPath(ctx, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter, b, b.cfg.General.UploadMaxBytesPerSecond); err != nil {
464464
return 0, fmt.Errorf("can't RBAC or config upload %s: %v", destinationRemote, err)
465465
}
466466
if b.resume {
467467
b.resumableState.AppendToState(destinationRemote, remoteUploadedBytes)
468468
}
469469
return uint64(remoteUploadedBytes), nil
470470
}
471-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
471+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
472472
err = retry.RunCtx(ctx, func(ctx context.Context) error {
473473
return b.dst.UploadCompressedStream(ctx, localBackupRelatedDir, localFiles, destinationRemote, b.cfg.General.UploadMaxBytesPerSecond)
474474
})
@@ -477,7 +477,7 @@ func (b *Backuper) uploadBackupRelatedDir(ctx context.Context, localBackupRelate
477477
}
478478

479479
var remoteUploaded storage.RemoteFile
480-
retry = retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
480+
retry = retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
481481
err = retry.RunCtx(ctx, func(ctx context.Context) error {
482482
remoteUploaded, err = b.dst.StatFile(ctx, destinationRemote)
483483
return err
@@ -553,7 +553,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet
553553
}
554554
}
555555
log.Debug().Msgf("start upload %d files to %s", len(partFiles), remotePath)
556-
if uploadPathBytes, err := b.dst.UploadPath(ctx, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b, b.cfg.General.UploadMaxBytesPerSecond); err != nil {
556+
if uploadPathBytes, err := b.dst.UploadPath(ctx, backupPath, partFiles, remotePath, b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter, b, b.cfg.General.UploadMaxBytesPerSecond); err != nil {
557557
log.Error().Msgf("UploadPath return error: %v", err)
558558
return fmt.Errorf("can't upload: %v", err)
559559
} else {
@@ -585,7 +585,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet
585585
}
586586
}
587587
log.Debug().Msgf("start upload %d files to %s", len(localFiles), remoteDataFile)
588-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
588+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
589589
err := retry.RunCtx(ctx, func(ctx context.Context) error {
590590
return b.dst.UploadCompressedStream(ctx, backupPath, localFiles, remoteDataFile, b.cfg.General.UploadMaxBytesPerSecond)
591591
})
@@ -595,7 +595,7 @@ func (b *Backuper) uploadTableData(ctx context.Context, backupName string, delet
595595
}
596596

597597
var remoteFile storage.RemoteFile
598-
retry = retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
598+
retry = retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
599599
err = retry.RunCtx(ctx, func(ctx context.Context) error {
600600
remoteFile, err = b.dst.StatFile(ctx, remoteDataFile)
601601
return err
@@ -651,7 +651,7 @@ func (b *Backuper) uploadTableMetadataRegular(ctx context.Context, backupName st
651651
return processedSize, nil
652652
}
653653
}
654-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
654+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
655655
err = retry.RunCtx(ctx, func(ctx context.Context) error {
656656
return b.dst.PutFile(ctx, remoteTableMetaFile, io.NopCloser(bytes.NewReader(content)), 0)
657657
})
@@ -696,7 +696,7 @@ func (b *Backuper) uploadTableMetadataEmbedded(ctx context.Context, backupName s
696696
log.Warn().Msgf("can't close %v: %v", localReader, err)
697697
}
698698
}()
699-
retry := retrier.New(retrier.ConstantBackoff(b.cfg.General.RetriesOnFailure, b.cfg.General.RetriesDuration), b)
699+
retry := retrier.New(retrier.ExponentialBackoff(b.cfg.General.RetriesOnFailure, common.AddRandomJitter(b.cfg.General.RetriesDuration, b.cfg.General.RetriesJitter)), b)
700700
err = retry.RunCtx(ctx, func(ctx context.Context) error {
701701
return b.dst.PutFile(ctx, remoteTableMetaFile, localReader, 0)
702702
})

pkg/common/common.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,11 @@
11
package common
22

33
import (
4+
"math/rand"
45
"net/url"
56
"reflect"
67
"strings"
8+
"time"
79
)
810

911
func TablePathEncode(str string) string {
@@ -108,3 +110,12 @@ func deepEqual(a, b interface{}) bool {
108110
// В остальных случаях используем reflect.DeepEqual
109111
return reflect.DeepEqual(a, b)
110112
}
113+
114+
func AddRandomJitter(duration time.Duration, jitterPercent int8) time.Duration {
115+
if jitterPercent <= 0 {
116+
return duration
117+
}
118+
maxJitter := duration * time.Duration(jitterPercent) / 100
119+
jitter := time.Duration(rand.Int63n(int64(maxJitter + 1)))
120+
return duration + jitter
121+
}

pkg/config/config.go

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -59,6 +59,7 @@ type GeneralConfig struct {
5959
RestoreTableMapping map[string]string `yaml:"restore_table_mapping" envconfig:"RESTORE_TABLE_MAPPING"`
6060
RetriesOnFailure int `yaml:"retries_on_failure" envconfig:"RETRIES_ON_FAILURE"`
6161
RetriesPause string `yaml:"retries_pause" envconfig:"RETRIES_PAUSE"`
62+
RetriesJitter int8 `yaml:"retries_jitter" envconfig:"RETRIES_JITTER"`
6263
WatchInterval string `yaml:"watch_interval" envconfig:"WATCH_INTERVAL"`
6364
FullInterval string `yaml:"full_interval" envconfig:"FULL_INTERVAL"`
6465
WatchBackupNameTemplate string `yaml:"watch_backup_name_template" envconfig:"WATCH_BACKUP_NAME_TEMPLATE"`

pkg/custom/download_custom.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package custom
33
import (
44
"context"
55
"fmt"
6+
"github.com/Altinity/clickhouse-backup/v2/pkg/common"
67
"github.com/Altinity/clickhouse-backup/v2/pkg/config"
78
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"
89
"github.com/eapache/go-resiliency/retrier"
@@ -34,7 +35,7 @@ func Download(ctx context.Context, retrierClassifier retrier.Classifier, cfg *co
3435
"schema": schemaOnly,
3536
}
3637
args := ApplyCommandTemplate(cfg.Custom.DownloadCommand, templateData)
37-
retry := retrier.New(retrier.ConstantBackoff(cfg.General.RetriesOnFailure, cfg.General.RetriesDuration), retrierClassifier)
38+
retry := retrier.New(retrier.ExponentialBackoff(cfg.General.RetriesOnFailure, common.AddRandomJitter(cfg.General.RetriesDuration, cfg.General.RetriesJitter)), retrierClassifier)
3839
err := retry.RunCtx(ctx, func(ctx context.Context) error {
3940
return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...)
4041
})

pkg/custom/upload_custom.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ package custom
33
import (
44
"context"
55
"fmt"
6+
"github.com/Altinity/clickhouse-backup/v2/pkg/common"
67
"github.com/Altinity/clickhouse-backup/v2/pkg/config"
78
"github.com/Altinity/clickhouse-backup/v2/pkg/utils"
89
"github.com/eapache/go-resiliency/retrier"
@@ -40,7 +41,7 @@ func Upload(ctx context.Context, retrierClassifier retrier.Classifier, cfg *conf
4041
"schema": schemaOnly,
4142
}
4243
args := ApplyCommandTemplate(cfg.Custom.UploadCommand, templateData)
43-
retry := retrier.New(retrier.ConstantBackoff(cfg.General.RetriesOnFailure, cfg.General.RetriesDuration), retrierClassifier)
44+
retry := retrier.New(retrier.ExponentialBackoff(cfg.General.RetriesOnFailure, common.AddRandomJitter(cfg.General.RetriesDuration, cfg.General.RetriesJitter)), retrierClassifier)
4445
err := retry.RunCtx(ctx, func(ctx context.Context) error {
4546
return utils.ExecCmd(ctx, cfg.Custom.CommandTimeoutDuration, args[0], args[1:]...)
4647
})

0 commit comments

Comments
 (0)