Skip to content

Commit e64ebe3

Browse files
committed
Next read after read full file returns EOF
GODRIVER-1013 Fix bug in DownloadStream and add RoundTrip tabular tests that test these bugs Change-Id: I73777b5b422fdf0bfdee344fb08c2f686940ccd8
1 parent 3aab4a2 commit e64ebe3

File tree

2 files changed

+82
-39
lines changed

2 files changed

+82
-39
lines changed

mongo/gridfs/download_stream.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,7 @@ package gridfs
88

99
import (
1010
"context"
11-
1211
"errors"
13-
1412
"io"
1513
"math"
1614
"time"
@@ -91,13 +89,16 @@ func (ds *DownloadStream) Read(p []byte) (int, error) {
9189

9290
bytesCopied := 0
9391
var err error
94-
9592
for bytesCopied < len(p) {
9693
if ds.bufferStart >= ds.bufferEnd {
9794
// Buffer is empty and can load in data from new chunk.
9895
err = ds.fillBuffer(ctx)
9996
if err != nil {
10097
if err == errNoMoreChunks {
98+
if bytesCopied == 0 {
99+
ds.done = true
100+
return 0, io.EOF
101+
}
101102
return bytesCopied, nil
102103
}
103104
return bytesCopied, err

mongo/gridfs/gridfs_test.go

Lines changed: 78 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -190,51 +190,93 @@ func TestGridFS(t *testing.T) {
190190
})
191191

192192
t.Run("RoundTrip", func(t *testing.T) {
193-
if !canRunRoundTripTest(t, db) {
194-
t.Skip()
195-
}
196193

197-
bucket, err := NewBucket(db, nil)
198-
if err != nil {
199-
t.Fatalf("Failed to create bucket: %v", err)
194+
oneMeg := 1024 * 1024
195+
smallBuffSize := 100
196+
197+
tests := []struct {
198+
name string
199+
chunkSize int // make -1 for no capacity for no chunkSize
200+
fileSize int
201+
bufSize int // make -1 for no capacity for no bufSize
202+
}{
203+
{"RoundTrip: original", -1, UploadBufferSize, -1},
204+
{"RoundTrip: chunk size multiple of file", oneMeg, UploadBufferSize, -1},
205+
{"RoundTrip: chunk size is file size", oneMeg, oneMeg, -1},
206+
{"RoundTrip: chunk size multiple of file size and with strict buffer size", oneMeg, UploadBufferSize, smallBuffSize},
207+
{"RoundTrip: chunk size multiple of file size and buffer size", oneMeg, UploadBufferSize, UploadBufferSize},
208+
{"RoundTrip: chunk size, file size, buffer size all the same", oneMeg, oneMeg, oneMeg},
200209
}
201210

202-
timeout := 5 * time.Second
203-
if israce.Enabled {
204-
timeout = 20 * time.Second // race detector causes 2-20x slowdown
205-
}
211+
for _, test := range tests {
206212

207-
err = bucket.SetWriteDeadline(time.Now().Add(timeout))
208-
if err != nil {
209-
t.Fatalf("Failed to set write deadline: %v", err)
210-
}
211-
err = bucket.Drop()
212-
if err != nil {
213-
t.Fatalf("Drop failed: %v", err)
214-
}
213+
t.Run(test.name, func(t *testing.T) {
215214

216-
// Test that Upload works when the buffer to write is longer than the upload stream's internal buffer.
217-
// This requires multiple calls to uploadChunks.
218-
size := UploadBufferSize + 1000000
219-
p := make([]byte, size)
220-
for i := 0; i < size; i++ {
221-
p[i] = byte(rand.Intn(100))
222-
}
215+
if !canRunRoundTripTest(t, db) {
216+
t.Skip()
217+
}
223218

224-
_, err = bucket.UploadFromStream("filename", bytes.NewReader(p))
225-
if err != nil {
226-
t.Fatalf("Upload failed: %v", err)
227-
}
219+
var chunkSize *int32
220+
var temp int32
221+
if test.chunkSize != -1 {
222+
temp = int32(test.chunkSize)
223+
chunkSize = &temp
224+
}
228225

229-
w := bytes.NewBuffer(make([]byte, 0))
230-
_, err = bucket.DownloadToStreamByName("filename", w)
231-
if err != nil {
232-
t.Fatalf("Download failed: %v", err)
233-
}
226+
bucket, err := NewBucket(db, &options.BucketOptions{
227+
ChunkSizeBytes: chunkSize,
228+
})
229+
if err != nil {
230+
t.Fatalf("Failed to create bucket: %v", err)
231+
}
232+
233+
timeout := 5 * time.Second
234+
if israce.Enabled {
235+
timeout = 20 * time.Second // race detector causes 2-20x slowdown
236+
}
237+
238+
err = bucket.SetWriteDeadline(time.Now().Add(timeout))
239+
if err != nil {
240+
t.Fatalf("Failed to set write deadline: %v", err)
241+
}
242+
err = bucket.Drop()
243+
if err != nil {
244+
t.Fatalf("Drop failed: %v", err)
245+
}
246+
247+
// Test that Upload works when the buffer to write is longer than the upload stream's internal buffer.
248+
// This requires multiple calls to uploadChunks.
249+
size := test.fileSize
250+
p := make([]byte, size)
251+
for i := 0; i < size; i++ {
252+
p[i] = byte(rand.Intn(100))
253+
}
254+
255+
_, err = bucket.UploadFromStream("filename", bytes.NewReader(p))
256+
if err != nil {
257+
t.Fatalf("Upload failed: %v", err)
258+
}
259+
260+
var w *bytes.Buffer
261+
if test.bufSize == -1 {
262+
w = bytes.NewBuffer(make([]byte, 0))
263+
} else {
264+
w = bytes.NewBuffer(make([]byte, 0, test.bufSize))
265+
}
266+
267+
_, err = bucket.DownloadToStreamByName("filename", w)
268+
if err != nil {
269+
t.Fatalf("Download failed: %v", err)
270+
}
271+
272+
if !bytes.Equal(p, w.Bytes()) {
273+
t.Errorf("Downloaded file did not match p.")
274+
}
275+
276+
})
234277

235-
if !bytes.Equal(p, w.Bytes()) {
236-
t.Errorf("Downloaded file did not match p.")
237278
}
279+
238280
})
239281

240282
err = client.Disconnect(ctx)

0 commit comments

Comments
 (0)