Skip to content

Commit 37f6451

Browse files
alexqyledamnever
andauthored
Fix S3 BucketWithRetries upload empty content issue (#5217)
* Implement grpc.Compressor.DecompressedSize for snappy to optimize memory allocations (#5213) Signed-off-by: Xiaochao Dong (@damnever) <[email protected]> Signed-off-by: Alex Le <[email protected]> * Fix S3 BucketWithRetries upload empty content issue Signed-off-by: Alex Le <[email protected]> * Update CHANGELOG Signed-off-by: Alex Le <[email protected]> * Revert "Implement grpc.Compressor.DecompressedSize for snappy to optimize memory allocations (#5213)" This reverts commit 4821ba3. Signed-off-by: Alex Le <[email protected]> * Only retry if input reader is seekable Signed-off-by: Alex Le <[email protected]> * Rename mock type Signed-off-by: Alex Le <[email protected]> * Add logging Signed-off-by: Alex Le <[email protected]> * nit fixing Signed-off-by: Alex Le <[email protected]> * add comment Signed-off-by: Alex Le <[email protected]> --------- Signed-off-by: Xiaochao Dong (@damnever) <[email protected]> Signed-off-by: Alex Le <[email protected]> Co-authored-by: Xiaochao Dong <[email protected]>
1 parent 5779116 commit 37f6451

File tree

3 files changed

+142
-2
lines changed

3 files changed

+142
-2
lines changed

CHANGELOG.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -50,6 +50,7 @@
5050
* [BUGFIX] Query-frontend: Fix shardable instant queries do not produce sorted results for `sort`, `sort_desc`, `topk`, `bottomk` functions. #5148, #5170
5151
* [BUGFIX] Querier: Fix `/api/v1/series` returning 5XX instead of 4XX when limits are hit. #5169
5252
* [BUGFIX] Compactor: Fix issue that shuffle sharding planner return error if block is under visit by other compactor. #5188
53+
* [BUGFIX] Fix S3 BucketWithRetries upload empty content issue #5217
5354
* [FEATURE] Alertmanager: Add support for time_intervals. #5102
5455

5556
## 1.14.0 2022-12-02

pkg/storage/bucket/s3/bucket_client.go

Lines changed: 18 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@ import (
66
"time"
77

88
"github.com/go-kit/log"
9+
"github.com/go-kit/log/level"
910
"github.com/prometheus/common/model"
1011
"github.com/thanos-io/objstore"
1112
"github.com/thanos-io/objstore/providers/s3"
@@ -29,6 +30,7 @@ func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucke
2930
return nil, err
3031
}
3132
return &BucketWithRetries{
33+
logger: logger,
3234
bucket: bucket,
3335
operationRetries: defaultOperationRetries,
3436
retryMinBackoff: defaultRetryMinBackoff,
@@ -48,6 +50,7 @@ func NewBucketReaderClient(cfg Config, name string, logger log.Logger) (objstore
4850
return nil, err
4951
}
5052
return &BucketWithRetries{
53+
logger: logger,
5154
bucket: bucket,
5255
operationRetries: defaultOperationRetries,
5356
retryMinBackoff: defaultRetryMinBackoff,
@@ -92,6 +95,7 @@ func newS3Config(cfg Config) (s3.Config, error) {
9295
}
9396

9497
type BucketWithRetries struct {
98+
logger log.Logger
9599
bucket objstore.Bucket
96100
operationRetries int
97101
retryMinBackoff time.Duration
@@ -115,7 +119,10 @@ func (b *BucketWithRetries) retry(ctx context.Context, f func() error) error {
115119
}
116120
retries.Wait()
117121
}
118-
return lastErr
122+
if lastErr != nil {
123+
level.Error(b.logger).Log("msg", "bucket operation fail after retries", "err", lastErr)
124+
}
125+
return nil
119126
}
120127

121128
func (b *BucketWithRetries) Name() string {
@@ -153,8 +160,17 @@ func (b *BucketWithRetries) Exists(ctx context.Context, name string) (exists boo
153160
}
154161

155162
func (b *BucketWithRetries) Upload(ctx context.Context, name string, r io.Reader) error {
156-
return b.retry(ctx, func() error {
163+
rs, ok := r.(io.ReadSeeker)
164+
if !ok {
165+
// Skip retry if incoming Reader is not seekable to avoid
166+
// loading entire content into memory
157167
return b.bucket.Upload(ctx, name, r)
168+
}
169+
return b.retry(ctx, func() error {
170+
if _, err := rs.Seek(0, io.SeekStart); err != nil {
171+
return err
172+
}
173+
return b.bucket.Upload(ctx, name, rs)
158174
})
159175
}
160176

Lines changed: 123 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
package s3
2+
3+
import (
4+
"bytes"
5+
"context"
6+
"fmt"
7+
"io"
8+
"testing"
9+
"time"
10+
11+
"github.com/stretchr/testify/require"
12+
"github.com/thanos-io/objstore"
13+
)
14+
15+
func TestBucketWithRetries_UploadSeekable(t *testing.T) {
16+
t.Parallel()
17+
18+
m := mockBucket{
19+
FailCount: 3,
20+
}
21+
b := BucketWithRetries{
22+
bucket: &m,
23+
operationRetries: 5,
24+
retryMinBackoff: 10 * time.Millisecond,
25+
retryMaxBackoff: time.Second,
26+
}
27+
28+
input := []byte("test input")
29+
err := b.Upload(context.Background(), "dummy", bytes.NewReader(input))
30+
require.NoError(t, err)
31+
require.Equal(t, input, m.uploadedContent)
32+
}
33+
34+
func TestBucketWithRetries_UploadNonSeekable(t *testing.T) {
35+
t.Parallel()
36+
37+
maxFailCount := 3
38+
m := mockBucket{
39+
FailCount: maxFailCount,
40+
}
41+
b := BucketWithRetries{
42+
bucket: &m,
43+
operationRetries: 5,
44+
retryMinBackoff: 10 * time.Millisecond,
45+
retryMaxBackoff: time.Second,
46+
}
47+
48+
input := &fakeReader{}
49+
err := b.Upload(context.Background(), "dummy", input)
50+
require.Errorf(t, err, "empty byte slice")
51+
require.Equal(t, maxFailCount, m.FailCount)
52+
}
53+
54+
type fakeReader struct {
55+
}
56+
57+
func (f *fakeReader) Read(p []byte) (n int, err error) {
58+
return 0, fmt.Errorf("empty byte slice")
59+
}
60+
61+
type mockBucket struct {
62+
FailCount int
63+
uploadedContent []byte
64+
}
65+
66+
// Upload mocks objstore.Bucket.Upload()
67+
func (m *mockBucket) Upload(ctx context.Context, name string, r io.Reader) error {
68+
var buf bytes.Buffer
69+
if _, err := buf.ReadFrom(r); err != nil {
70+
return err
71+
}
72+
m.uploadedContent = buf.Bytes()
73+
if m.FailCount > 0 {
74+
m.FailCount--
75+
return fmt.Errorf("failed upload: %d", m.FailCount)
76+
}
77+
return nil
78+
}
79+
80+
// Delete mocks objstore.Bucket.Delete()
81+
func (m *mockBucket) Delete(ctx context.Context, name string) error {
82+
return nil
83+
}
84+
85+
// Name mocks objstore.Bucket.Name()
86+
func (m *mockBucket) Name() string {
87+
return "mock"
88+
}
89+
90+
// Iter mocks objstore.Bucket.Iter()
91+
func (m *mockBucket) Iter(ctx context.Context, dir string, f func(string) error, options ...objstore.IterOption) error {
92+
return nil
93+
}
94+
95+
// Get mocks objstore.Bucket.Get()
96+
func (m *mockBucket) Get(ctx context.Context, name string) (io.ReadCloser, error) {
97+
return nil, nil
98+
}
99+
100+
// GetRange mocks objstore.Bucket.GetRange()
101+
func (m *mockBucket) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) {
102+
return nil, nil
103+
}
104+
105+
// Exists mocks objstore.Bucket.Exists()
106+
func (m *mockBucket) Exists(ctx context.Context, name string) (bool, error) {
107+
return false, nil
108+
}
109+
110+
// IsObjNotFoundErr mocks objstore.Bucket.IsObjNotFoundErr()
111+
func (m *mockBucket) IsObjNotFoundErr(err error) bool {
112+
return false
113+
}
114+
115+
// ObjectSize mocks objstore.Bucket.Attributes()
116+
func (m *mockBucket) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) {
117+
return objstore.ObjectAttributes{Size: 0, LastModified: time.Now()}, nil
118+
}
119+
120+
// Close mocks objstore.Bucket.Close()
121+
func (m *mockBucket) Close() error {
122+
return nil
123+
}

0 commit comments

Comments
 (0)