@@ -15,6 +15,7 @@ import (
1515 "io/ioutil"
1616 "log"
1717 "os"
18+ "strings"
1819 "sync"
1920 "sync/atomic"
2021 "testing"
@@ -76,7 +77,7 @@ func TestPutBulkBlobSpanningChunksRandomAccess(t *testing.T) {
7677
7778 strategy := newTestTransferStrategy (t )
7879
79- writeObj , err := getTestWriteObjectRandomAccess (LargeBookTitle , LargeBookPath + LargeBookTitle )
80+ writeObj , err := getTestWriteObjectRandomAccess (LargeBookTitle , LargeBookPath + LargeBookTitle )
8081
8182 var writeObjects []helperModels.PutObject
8283 writeObjects = append (writeObjects , * writeObj )
@@ -89,8 +90,7 @@ func TestPutBulkBlobSpanningChunksRandomAccess(t *testing.T) {
8990 t .Error ("expected to get a BP job ID, but instead got nothing" )
9091 }
9192
92-
93- testutils .VerifyFilesOnBP (t , testBucket , []string {LargeBookTitle }, LargeBookPath , client )
93+ testutils .VerifyFilesOnBP (t , testBucket , []string {LargeBookTitle }, LargeBookPath , client )
9494}
9595
9696func TestPutBulkBlobSpanningChunksStreamAccess (t * testing.T ) {
@@ -100,7 +100,7 @@ func TestPutBulkBlobSpanningChunksStreamAccess(t *testing.T) {
100100
101101 strategy := newTestTransferStrategy (t )
102102
103- writeObj , err := getTestWriteObjectStreamAccess (LargeBookTitle , LargeBookPath + LargeBookTitle )
103+ writeObj , err := getTestWriteObjectStreamAccess (LargeBookTitle , LargeBookPath + LargeBookTitle )
104104
105105 var writeObjects []helperModels.PutObject
106106 writeObjects = append (writeObjects , * writeObj )
@@ -113,7 +113,7 @@ func TestPutBulkBlobSpanningChunksStreamAccess(t *testing.T) {
113113 t .Error ("expected to get a BP job ID, but instead got nothing" )
114114 }
115115
116- testutils .VerifyFilesOnBP (t , testBucket , []string {LargeBookTitle }, LargeBookPath , client )
116+ testutils .VerifyFilesOnBP (t , testBucket , []string {LargeBookTitle }, LargeBookPath , client )
117117}
118118
119119// GOSDK-26: On archive helpers can hang indefinitely using streaming strategy if blob fails.
@@ -136,13 +136,13 @@ func TestPutBulkBlobSpanningChunksStreamAccessDoesNotExist(t *testing.T) {
136136 strategy := helpers.WriteTransferStrategy {
137137 BlobStrategy : newTestBlobStrategy (),
138138 Options : helpers.WriteBulkJobOptions {MaxUploadSize : & helpers .MinUploadSize },
139- Listeners : helpers.ListenerStrategy {ErrorCallback :errorCallback },
139+ Listeners : helpers.ListenerStrategy {ErrorCallback : errorCallback },
140140 }
141141
142142 // open a file but lie that its bigger than it is
143143 f , err := os .Open (testutils .BookPath + testutils .BookTitles [0 ])
144144 writeObj := helperModels.PutObject {
145- PutObject : ds3Models.Ds3PutObject {Name : LargeBookTitle , Size : 20 * 1024 * 1024 },
145+ PutObject : ds3Models.Ds3PutObject {Name : LargeBookTitle , Size : 20 * 1024 * 1024 },
146146 ChannelBuilder : & testStreamAccessReadChannelBuilder {f : f },
147147 }
148148
@@ -164,9 +164,9 @@ func TestGetBulk(t *testing.T) {
164164 helper := helpers .NewHelpers (client )
165165
166166 strategy := helpers.ReadTransferStrategy {
167- Options : helpers.ReadBulkJobOptions {}, // use default job options
167+ Options : helpers.ReadBulkJobOptions {}, // use default job options
168168 BlobStrategy : newTestBlobStrategy (),
169- Listeners : newErrorOnErrorListenerStrategy (t ),
169+ Listeners : newErrorOnErrorListenerStrategy (t ),
170170 }
171171
172172 file0 , err := ioutil .TempFile (os .TempDir (), "goTest" )
@@ -189,7 +189,7 @@ func TestGetBulk(t *testing.T) {
189189 defer file3 .Close ()
190190 defer os .Remove (file3 .Name ())
191191
192- readObjects := []helperModels.GetObject {
192+ readObjects := []helperModels.GetObject {
193193 {Name : testutils .BookTitles [0 ], ChannelBuilder : channels .NewWriteChannelBuilder (file0 .Name ())},
194194 {Name : testutils .BookTitles [1 ], ChannelBuilder : channels .NewWriteChannelBuilder (file1 .Name ())},
195195 {Name : testutils .BookTitles [2 ], ChannelBuilder : channels .NewWriteChannelBuilder (file2 .Name ())},
@@ -216,9 +216,9 @@ func TestGetBulkBlobSpanningChunksRandomAccess(t *testing.T) {
216216 helper := helpers .NewHelpers (client )
217217
218218 strategy := helpers.ReadTransferStrategy {
219- Options : helpers.ReadBulkJobOptions {}, // use default job options
219+ Options : helpers.ReadBulkJobOptions {}, // use default job options
220220 BlobStrategy : newTestBlobStrategy (),
221- Listeners : newErrorOnErrorListenerStrategy (t ),
221+ Listeners : newErrorOnErrorListenerStrategy (t ),
222222 }
223223
224224 file , err := ioutil .TempFile (os .TempDir (), "goTest" )
@@ -248,17 +248,17 @@ func TestGetBulkBlobSpanningChunksStreaming(t *testing.T) {
248248 helper := helpers .NewHelpers (client )
249249
250250 strategy := helpers.ReadTransferStrategy {
251- Options : helpers.ReadBulkJobOptions {}, // use default job options
251+ Options : helpers.ReadBulkJobOptions {}, // use default job options
252252 BlobStrategy : newTestBlobStrategy (),
253- Listeners : newErrorOnErrorListenerStrategy (t ),
253+ Listeners : newErrorOnErrorListenerStrategy (t ),
254254 }
255255
256256 file , err := ioutil .TempFile (os .TempDir (), "goTest" )
257257 ds3Testing .AssertNilError (t , err )
258258 defer file .Close ()
259259 defer os .Remove (file .Name ())
260260
261- fileWriter , err := os .OpenFile (file .Name (), os .O_WRONLY | os .O_CREATE , os .ModePerm )
261+ fileWriter , err := os .OpenFile (file .Name (), os .O_WRONLY | os .O_CREATE , os .ModePerm )
262262 defer fileWriter .Close ()
263263
264264 readObjects := []helperModels.GetObject {
@@ -293,7 +293,7 @@ func TestGetBulkBlobSpanningChunksStreamingFailBlob(t *testing.T) {
293293 }
294294
295295 strategy := helpers.ReadTransferStrategy {
296- Options : helpers.ReadBulkJobOptions {}, // use default job options
296+ Options : helpers.ReadBulkJobOptions {}, // use default job options
297297 BlobStrategy : newTestBlobStrategy (),
298298 Listeners : helpers.ListenerStrategy {
299299 ErrorCallback : errorCallback ,
@@ -322,17 +322,17 @@ func TestGetBulkPartialObjectRandomAccess(t *testing.T) {
322322 helper := helpers .NewHelpers (client )
323323
324324 strategy := helpers.ReadTransferStrategy {
325- Options : helpers.ReadBulkJobOptions {}, // use default job options
325+ Options : helpers.ReadBulkJobOptions {}, // use default job options
326326 BlobStrategy : newTestBlobStrategy (),
327- Listeners : newErrorOnErrorListenerStrategy (t ),
327+ Listeners : newErrorOnErrorListenerStrategy (t ),
328328 }
329329
330330 file , err := ioutil .TempFile (os .TempDir (), "goTest" )
331331 ds3Testing .AssertNilError (t , err )
332332 defer file .Close ()
333333 defer os .Remove (file .Name ())
334334
335- ranges := []ds3Models.Range {
335+ ranges := []ds3Models.Range {
336336 {0 , 100 },
337337 {200 , 300 },
338338 {301 , 400 },
@@ -350,9 +350,9 @@ func TestGetBulkPartialObjectRandomAccess(t *testing.T) {
350350 }
351351
352352 file .Seek (0 , io .SeekStart )
353- testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 101 , 0 , file )
354- testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 201 , 200 , file )
355- testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 101 , 500 , file )
353+ testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 101 , 0 , file )
354+ testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 201 , 200 , file )
355+ testutils .VerifyPartialFile (t , LargeBookPath + LargeBookTitle , 101 , 500 , file )
356356}
357357
358358func TestPutObjectDoesNotExist (t * testing.T ) {
@@ -374,16 +374,16 @@ func TestPutObjectDoesNotExist(t *testing.T) {
374374 strategy := helpers.WriteTransferStrategy {
375375 BlobStrategy : newTestBlobStrategy (),
376376 Options : helpers.WriteBulkJobOptions {MaxUploadSize : & helpers .MinUploadSize },
377- Listeners : helpers.ListenerStrategy {ErrorCallback :errorCallback },
377+ Listeners : helpers.ListenerStrategy {ErrorCallback : errorCallback },
378378 }
379379
380380 channelBuilder := channels .NewReadChannelBuilder (testObjectName )
381381 nonExistentPutObj := helperModels.PutObject {
382- PutObject : ds3Models.Ds3PutObject {Name :testObjectName ,Size :10 },
382+ PutObject : ds3Models.Ds3PutObject {Name : testObjectName , Size : 10 },
383383 ChannelBuilder : channelBuilder ,
384384 }
385385
386- writeObjects := []helperModels.PutObject { nonExistentPutObj }
386+ writeObjects := []helperModels.PutObject { nonExistentPutObj }
387387
388388 _ , err := helper .PutObjects (testBucket , writeObjects , strategy )
389389 ds3Testing .AssertNilError (t , err )
@@ -416,7 +416,7 @@ func (builder *emptyReadChannelBuilder) OnDone(reader io.ReadCloser) {
416416 reader .Close ()
417417}
418418
419- type emptyWriteCloser struct {}
419+ type emptyWriteCloser struct {}
420420
421421func (writer * emptyWriteCloser ) Write (p []byte ) (n int , err error ) {
422422 return len (p ), nil
@@ -458,16 +458,16 @@ func TestBulkPutAndGetLotsOfFiles(t *testing.T) {
458458 for i := 0 ; i < numObjects ; i ++ {
459459 objName := fmt .Sprintf ("file-%d.txt" , i )
460460 curWriteObj := helperModels.PutObject {
461- PutObject : ds3Models.Ds3PutObject {Name :objName , Size :0 },
461+ PutObject : ds3Models.Ds3PutObject {Name : objName , Size : 0 },
462462 ChannelBuilder : & emptyReadChannelBuilder {},
463463 }
464464 writeObjects = append (writeObjects , curWriteObj )
465465 }
466466
467467 writeStrategy := helpers.WriteTransferStrategy {
468468 BlobStrategy : & helpers.SimpleBlobStrategy {
469- Delay :time .Second * 30 ,
470- MaxConcurrentTransfers :10 ,
469+ Delay : time .Second * 30 ,
470+ MaxConcurrentTransfers : 10 ,
471471 },
472472 Options : helpers.WriteBulkJobOptions {MaxUploadSize : & helpers .MinUploadSize },
473473 Listeners : newErrorOnErrorListenerStrategy (t ),
@@ -492,8 +492,8 @@ func TestBulkPutAndGetLotsOfFiles(t *testing.T) {
492492 readStrategy := helpers.ReadTransferStrategy {
493493 Options : helpers.ReadBulkJobOptions {}, // use default job options
494494 BlobStrategy : & helpers.SimpleBlobStrategy {
495- Delay :time .Second * 30 ,
496- MaxConcurrentTransfers :10 ,
495+ Delay : time .Second * 30 ,
496+ MaxConcurrentTransfers : 10 ,
497497 },
498498 Listeners : newErrorOnErrorListenerStrategy (t ),
499499 }
@@ -543,8 +543,8 @@ func TestRetryGettingBlobRange(t *testing.T) {
543543 }()
544544
545545 // get a range of the blob
546- startRange := blob .Offset + 10 // retrieve subset of blob
547- endRange := blob .Length + blob .Offset - 1
546+ startRange := blob .Offset + 10 // retrieve subset of blob
547+ endRange := blob .Length + blob .Offset - 1
548548 bytesWritten , err := helpers .RetryGettingBlobRange (client , testBucket , writeObj .PutObject .Name , blob .Offset , startRange , endRange , tempFile , client .Logger )
549549 ds3Testing .AssertNilError (t , err )
550550 ds3Testing .AssertInt64 (t , "bytes written" , endRange - startRange + 1 , bytesWritten )
@@ -554,7 +554,7 @@ func TestRetryGettingBlobRange(t *testing.T) {
554554 ds3Testing .AssertNilError (t , err )
555555
556556 tempFile .Seek (0 , 0 )
557- length := endRange - startRange
557+ length := endRange - startRange
558558 testutils .VerifyPartialFile (t , bigFilePath , length , startRange , tempFile )
559559 }()
560560 blobsChecked ++
@@ -638,9 +638,9 @@ func TestRetryGetObjectsWhenSomeObjectsDoNotExist(t *testing.T) {
638638 }
639639
640640 strategy := helpers.ReadTransferStrategy {
641- Options : helpers.ReadBulkJobOptions {}, // use default job options
641+ Options : helpers.ReadBulkJobOptions {}, // use default job options
642642 BlobStrategy : newTestBlobStrategy (),
643- Listeners : errListener ,
643+ Listeners : errListener ,
644644 }
645645
646646 file0 , err := ioutil .TempFile (os .TempDir (), "goTest" )
@@ -675,7 +675,7 @@ func TestRetryGetObjectsWhenSomeObjectsDoNotExist(t *testing.T) {
675675
676676 doesNotExistChannelBuilder1 := channels .NewWriteChannelBuilder (doesNotExist1 .Name ())
677677 doesNotExistChannelBuilder2 := channels .NewWriteChannelBuilder (doesNotExist2 .Name ())
678- readObjects := []helperModels.GetObject {
678+ readObjects := []helperModels.GetObject {
679679 {Name : testutils .BookTitles [0 ], ChannelBuilder : channels .NewWriteChannelBuilder (file0 .Name ())},
680680 {Name : testutils .BookTitles [1 ], ChannelBuilder : channels .NewWriteChannelBuilder (file1 .Name ())},
681681 {Name : testutils .BookTitles [2 ], ChannelBuilder : channels .NewWriteChannelBuilder (file2 .Name ())},
@@ -708,3 +708,49 @@ func TestRetryGetObjectsWhenSomeObjectsDoNotExist(t *testing.T) {
708708 ds3Testing .AssertBool (t , "has fatal error" , true , doesNotExistChannelBuilder1 .HasFatalError ())
709709 ds3Testing .AssertBool (t , "has fatal error" , true , doesNotExistChannelBuilder2 .HasFatalError ())
710710}
711+
712+ func TestRetryGetObjectsWhenNoObjectExist (t * testing.T ) {
713+ defer testutils .DeleteBucketContents (client , testBucket )
714+ err := testutils .PutTestBooks (client , testBucket )
715+ ds3Testing .AssertNilError (t , err )
716+
717+ helper := helpers .NewHelpers (client )
718+
719+ perObjectErrCount := int64 (0 )
720+ errListener := helpers.ListenerStrategy {
721+ ErrorCallback : func (objectName string , err error ) {
722+ atomic .AddInt64 (& perObjectErrCount , 1 )
723+ },
724+ }
725+
726+ strategy := helpers.ReadTransferStrategy {
727+ Options : helpers.ReadBulkJobOptions {}, // use default job options
728+ BlobStrategy : newTestBlobStrategy (),
729+ Listeners : errListener ,
730+ }
731+
732+ doesNotExist1 , err := ioutil .TempFile (os .TempDir (), "goTest" )
733+ ds3Testing .AssertNilError (t , err )
734+ ds3Testing .AssertNilError (t , doesNotExist1 .Close ())
735+ defer os .Remove (doesNotExist1 .Name ())
736+
737+ doesNotExistChannelBuilder1 := channels .NewWriteChannelBuilder (doesNotExist1 .Name ())
738+ readObjects := []helperModels.GetObject {
739+ {Name : "doesNotExist1" , ChannelBuilder : doesNotExistChannelBuilder1 },
740+ }
741+
742+ _ , err = helper .GetObjects (testBucket , readObjects , strategy )
743+ if err == nil {
744+ t .Fatalf ("expected error when creating GET job" )
745+ } else if ! strings .Contains (err .Error (), "Could not find requested blobs for" ) {
746+ t .Fatalf ("expected '404 Could not find requested blobs' error but got %v" , err )
747+ }
748+
749+ ds3Testing .AssertInt64 (t , "per-object error count" , 1 , perObjectErrCount )
750+
751+ doesNotExistStat , err := os .Stat (doesNotExist1 .Name ())
752+ ds3Testing .AssertNilError (t , err )
753+ ds3Testing .AssertInt64 (t , "size of file" , 0 , doesNotExistStat .Size ())
754+
755+ ds3Testing .AssertBool (t , "has fatal error" , true , doesNotExistChannelBuilder1 .HasFatalError ())
756+ }
0 commit comments