@@ -367,16 +367,18 @@ async fn filter_tar_async(
367367
368368 let r = filter_tar ( & mut src, dest, & config, & repo_tmpdir) ;
369369
370- // We need to make sure to flush out the decompressor here,
371- // otherwise it's possible that we finish processing the tar
372- // stream but leave data in the pipe. For example,
373- // zstd:chunked layers will have metadata/skippable frames at
374- // the end of the stream. That data isn't relevant to the tar
375- // stream, but if we don't read it here then on the skopeo
376- // proxy we'll block trying to write the end of the stream.
377- // That in turn will block our client end trying to call
378- // FinishPipe, and we end up deadlocking ourselves through
379- // skopeo.
370+ // We need to make sure to flush out the decompressor and/or
371+ // tar stream here. For tar, we might not read through the
372+ // entire stream, because the archive has zero-block-markers
373+ // at the end; or possibly because the final entry is filtered
374+ // in filter_tar so we don't advance to read the data. For
375+ // decompressor, zstd:chunked layers will have
376+ // metadata/skippable frames at the end of the stream. That
377+ // data isn't relevant to the tar stream, but if we don't read
378+ // it here then on the skopeo proxy we'll block trying to
379+ // write the end of the stream. That in turn will block our
380+ // client end trying to call FinishPipe, and we end up
381+ // deadlocking ourselves through skopeo.
380382 //
381383 // https://github.com/bootc-dev/bootc/issues/1204
382384 let mut sink = std:: io:: sink ( ) ;
@@ -385,7 +387,6 @@ async fn filter_tar_async(
385387 tracing:: debug!( "Read extra {n} bytes at end of decompressor stream" ) ;
386388 }
387389
388- // Pass ownership of the input stream back to the caller - see below.
389390 Ok ( r)
390391 } ) ;
391392 let copier = tokio:: io:: copy ( & mut rx_buf, & mut dest) ;
0 commit comments