@@ -71,19 +71,24 @@ func toReadObjectMap(getObjects *[]helperModels.GetObject) map[string]helperMode
7171
7272// Processes all the blobs in a chunk that are ready for transfer from BP
7373// Returns the number of blobs queued for process
74- func (producer * getProducer ) processChunk (curChunk * ds3Models.Objects , bucketName string , jobId string ) int {
74+ func (producer * getProducer ) processChunk (curChunk * ds3Models.Objects , bucketName string , jobId string ) ( int , error ) {
7575 producer .Debugf ("begin chunk processing %s" , curChunk .ChunkId )
7676
7777 processedCount := 0
7878 // transfer blobs that are ready, and queue those that are waiting for channel
7979 for _ , curObj := range curChunk .Objects {
8080 producer .Debugf ("queuing object in waiting to be processed %s offset=%d length=%d" , * curObj .Name , curObj .Offset , curObj .Length )
8181 blob := helperModels .NewBlobDescription (* curObj .Name , curObj .Offset , curObj .Length )
82- if producer .queueBlobForTransfer (& blob , bucketName , jobId ) {
82+
83+ blobQueued , err := producer .queueBlobForTransfer (& blob , bucketName , jobId )
84+ if err != nil {
85+ return 0 , err
86+ }
87+ if blobQueued {
8388 processedCount ++
8489 }
8590 }
86- return processedCount
91+ return processedCount , nil
8792}
8893
8994// Information required to perform a get operation of a blob with BP as data source and channelBuilder as destination
@@ -231,24 +236,37 @@ func writeRangeToDestination(channelBuilder helperModels.WriteChannelBuilder, bl
231236// Attempts to transfer a single blob from the BP to the client. If the blob is not ready for transfer,
232237// then it is added to the waiting to transfer queue
233238// Returns whether or not the blob was queued for transfer
234- func (producer * getProducer ) queueBlobForTransfer (blob * helperModels.BlobDescription , bucketName string , jobId string ) bool {
239+ func (producer * getProducer ) queueBlobForTransfer (blob * helperModels.BlobDescription , bucketName string , jobId string ) ( bool , error ) {
235240 if producer .processedBlobTracker .IsProcessed (* blob ) {
236- return false // already been processed
241+ return false , nil // already been processed
237242 }
238243
239- curReadObj := producer .readObjectMap [blob .Name ()]
244+ curReadObj , ok := producer .readObjectMap [blob .Name ()]
245+ if ! ok {
246+ err := fmt .Errorf ("failed to find object associated with blob in object map: %s offset=%d length=%d" , blob .Name (), blob .Offset (), blob .Length ())
247+ producer .Errorf ("unrecoverable error: %v" , err )
248+ producer .processedBlobTracker .MarkProcessed (* blob )
249+ return false , err // fatal error occurred
250+ }
251+
252+ if curReadObj .ChannelBuilder == nil {
253+ err := fmt .Errorf ("failed to transfer object, it does not have a channel builder: %s" , curReadObj .Name )
254+ producer .Errorf ("unrecoverable error: %v" , err )
255+ producer .processedBlobTracker .MarkProcessed (* blob )
256+ return false , err // fatal error occurred
257+ }
240258
241259 if curReadObj .ChannelBuilder .HasFatalError () {
242260 // a fatal error happened on a previous blob for this file, skip processing
243261 producer .Warningf ("fatal error occurred while transferring previous blob on this file, skipping blob '%s' offset=%d length=%d" , blob .Name (), blob .Offset (), blob .Length ())
244262 producer .processedBlobTracker .MarkProcessed (* blob )
245- return false // not going to process
263+ return false , nil // not going to process
246264 }
247265
248266 if ! curReadObj .ChannelBuilder .IsChannelAvailable (blob .Offset ()) {
249267 producer .Debugf ("channel is not currently available for getting blob '%s' offset=%d length=%d" , blob .Name (), blob .Offset (), blob .Length ())
250268 producer .deferredBlobQueue .Push (blob )
251- return false // not ready to be processed
269+ return false , nil // not ready to be processed
252270 }
253271
254272 producer .Debugf ("channel is available for getting blob '%s' offset=%d length=%d" , blob .Name (), blob .Offset (), blob .Length ())
@@ -261,7 +279,7 @@ func (producer *getProducer) queueBlobForTransfer(blob *helperModels.BlobDescrip
261279 jobId : jobId ,
262280 }
263281
264- var transfer TransferOperation = producer .transferOperationBuilder (objInfo )
282+ transfer : = producer .transferOperationBuilder (objInfo )
265283
266284 // Increment wait group, and enqueue transfer operation
267285 producer .waitGroup .Add (1 )
@@ -270,31 +288,35 @@ func (producer *getProducer) queueBlobForTransfer(blob *helperModels.BlobDescrip
270288 // Mark blob as processed
271289 producer .processedBlobTracker .MarkProcessed (* blob )
272290
273- return true
291+ return true , nil
274292}
275293
276294// Attempts to process all blobs whose channels were not available for transfer.
277295// Blobs whose channels are still not available are placed back on the queue.
278296// Returns the number of blobs queued for processing.
279- func (producer * getProducer ) processWaitingBlobs (bucketName string , jobId string ) int {
297+ func (producer * getProducer ) processWaitingBlobs (bucketName string , jobId string ) ( int , error ) {
280298 processedCount := 0
281299
282300 // attempt to process all blobs in waiting to be transferred
283301 waitingBlobs := producer .deferredBlobQueue .Size ()
284302 for i := 0 ; i < waitingBlobs ; i ++ {
285303 //attempt transfer
286304 curBlob , err := producer .deferredBlobQueue .Pop ()
287- producer .Debugf ("attempting to process '%s' offset=%d length=%d" , curBlob .Name (), curBlob .Offset (), curBlob .Length ())
288305 if err != nil {
289306 //should not be possible to get here
290- producer .Errorf ("failure during blob transfer '%s' at offset %d: %s" , curBlob . Name (), curBlob . Offset (), err .Error ())
307+ producer .Errorf (err .Error ())
291308 break
292309 }
293- if producer .queueBlobForTransfer (curBlob , bucketName , jobId ) {
310+ producer .Debugf ("attempting to process '%s' offset=%d length=%d" , curBlob .Name (), curBlob .Offset (), curBlob .Length ())
311+ blobQueued , err := producer .queueBlobForTransfer (curBlob , bucketName , jobId )
312+ if err != nil {
313+ return 0 , err
314+ }
315+ if blobQueued {
294316 processedCount ++
295317 }
296318 }
297- return processedCount
319+ return processedCount , nil
298320}
299321
300322// This initiates the production of the transfer operations which will be consumed by a consumer running in a separate go routine.
@@ -304,7 +326,7 @@ func (producer *getProducer) run() error {
304326 defer close (* producer .queue )
305327
306328 // determine number of blobs to be processed
307- var totalBlobCount int64 = producer .totalBlobCount ()
329+ var totalBlobCount = producer .totalBlobCount ()
308330 producer .Debugf ("job status totalBlobs=%d processedBlobs=%d" , totalBlobCount , producer .processedBlobTracker .NumberOfProcessedBlobs ())
309331
310332 // process all chunks and make sure all blobs are queued for transfer
@@ -332,7 +354,10 @@ func (producer *getProducer) hasMoreToProcess(totalBlobCount int64) bool {
332354// Returns the number of blobs that have been queued for transfer
333355func (producer * getProducer ) queueBlobsReadyForTransfer (totalBlobCount int64 ) (int , error ) {
334356 // Attempt to transfer waiting blobs
335- processedCount := producer .processWaitingBlobs (* producer .JobMasterObjectList .BucketName , producer .JobMasterObjectList .JobId )
357+ processedCount , err := producer .processWaitingBlobs (* producer .JobMasterObjectList .BucketName , producer .JobMasterObjectList .JobId )
358+ if err != nil {
359+ return 0 , err
360+ }
336361
337362 // Check if we need to query the BP for allocated blobs, or if we already know everything is allocated.
338363 if int64 (producer .deferredBlobQueue .Size ()) + producer .processedBlobTracker .NumberOfProcessedBlobs () >= totalBlobCount {
@@ -356,7 +381,11 @@ func (producer *getProducer) queueBlobsReadyForTransfer(totalBlobCount int64) (i
356381 // Loop through all the chunks that are available for processing, and send
357382 // the files that are contained within them.
358383 for _ , curChunk := range chunksReadyResponse .MasterObjectList .Objects {
359- processedCount += producer .processChunk (& curChunk , * chunksReadyResponse .MasterObjectList .BucketName , chunksReadyResponse .MasterObjectList .JobId )
384+ justProcessedCount , err := producer .processChunk (& curChunk , * chunksReadyResponse .MasterObjectList .BucketName , chunksReadyResponse .MasterObjectList .JobId )
385+ if err != nil {
386+ return 0 , err
387+ }
388+ processedCount += justProcessedCount
360389 }
361390 }
362391 return processedCount , nil
0 commit comments