Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 4 additions & 3 deletions common/libnetwork/cni/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -327,9 +327,10 @@ func (n *cniNetwork) NetworkInfo() types.NetworkInfo {
}

info := types.NetworkInfo{
Backend: types.CNI,
Package: packageVersion,
Path: path,
Backend: types.CNI,
Package: packageVersion,
Path: path,
DefaultNetwork: n.defaultNetwork,
}

dnsPath := filepath.Join(path, "dnsname")
Expand Down
9 changes: 5 additions & 4 deletions common/libnetwork/netavark/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -369,10 +369,11 @@ func (n *netavarkNetwork) NetworkInfo() types.NetworkInfo {
logrus.Infof("Failed to get the netavark version: %v", err)
}
info := types.NetworkInfo{
Backend: types.Netavark,
Version: programVersion,
Package: packageVersion,
Path: path,
Backend: types.Netavark,
Version: programVersion,
Package: packageVersion,
Path: path,
DefaultNetwork: n.defaultNetwork,
}

dnsPath := n.aardvarkBinary
Expand Down
11 changes: 6 additions & 5 deletions common/libnetwork/types/network.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,11 +97,12 @@ type NetworkUpdateOptions struct {

// NetworkInfo contains the network information.
type NetworkInfo struct {
Backend NetworkBackend `json:"backend"`
Version string `json:"version,omitempty"`
Package string `json:"package,omitempty"`
Path string `json:"path,omitempty"`
DNS DNSNetworkInfo `json:"dns,omitempty"`
Backend NetworkBackend `json:"backend"`
Version string `json:"version,omitempty"`
Package string `json:"package,omitempty"`
Path string `json:"path,omitempty"`
DNS DNSNetworkInfo `json:"dns,omitempty"`
DefaultNetwork string `json:"default_network,omitempty"`
}

// DNSNetworkInfo contains the DNS information.
Expand Down
11 changes: 4 additions & 7 deletions go.work.sum
Original file line number Diff line number Diff line change
@@ -1,22 +1,20 @@
github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/clipperhouse/uax29/v2 v2.2.0/go.mod h1:EFJ2TJMRUaplDxHKj1qAEhCtQPW2tJSwu5BF98AuoVM=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8=
github.com/cpuguy83/go-md2man/v2 v2.0.5/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/cyphar/filepath-securejoin v0.5.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGLDGQL7h7bg04C/+u9jI=
github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw=
github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8=
github.com/magefile/mage v1.14.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sirupsen/logrus v1.9.4-0.20230606125235-dd1b4c2e81af/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
github.com/mistifyio/go-zfs v2.1.1+incompatible h1:gAMO1HM9xBRONLHHYnu5iFsOJUiJdNZo6oqSENd4eW8=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tchap/go-patricia v2.3.0+incompatible h1:GkY4dP3cEfEASBPPkWd+AmjYxhmDkqO9/zg7R0lSQRs=
github.com/urfave/cli v1.22.16/go.mod h1:EeJR6BKodywf4zciqrdw6hpCPk68JO9z5LazXZMn5Po=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.36.0/go.mod h1:90PoxvaEB5n6AOdZvi+yWJQoE95U8Dhhw2bSyRqnTD0=
go.opentelemetry.io/proto/otlp v1.6.0/go.mod h1:cicgGehlFuNdgZkcALOCh3VE6K/u2tAjzlRhDwmVpZc=
Expand All @@ -26,7 +24,6 @@ golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
Expand Down
17 changes: 17 additions & 0 deletions image/copy/compression.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ func (ic *imageCopier) blobPipelineCompressionStep(stream *sourceStream, canModi
if canModifyBlob && layerCompressionChangeSupported {
for _, fn := range []func(*sourceStream, bpDetectCompressionStepData) (*bpCompressionStepData, error){
ic.bpcPreserveEncrypted,
ic.bpcPreserveNoCompress,
ic.bpcCompressUncompressed,
ic.bpcRecompressCompressed,
ic.bpcDecompressCompressed,
Expand Down Expand Up @@ -153,6 +154,22 @@ func (ic *imageCopier) bpcPreserveEncrypted(stream *sourceStream, _ bpDetectComp
return nil, nil
}

// bpcPreserveNoCompress checks if the input is a no-compress type (like tar-diff), and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcPreserveNoCompress(stream *sourceStream, _ bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if manifest.IsNoCompressType(stream.info.MediaType) {
logrus.Debugf("Using original blob without modification for no-compress type")
return &bpCompressionStepData{
operation: bpcOpPreserveOpaque,
uploadedOperation: types.PreserveOriginal,
uploadedAlgorithm: nil,
srcCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
uploadedCompressorBaseVariantName: internalblobinfocache.UnknownCompression,
uploadedCompressorSpecificVariantName: internalblobinfocache.UnknownCompression,
}, nil
}
return nil, nil
}

// bpcCompressUncompressed checks if we should be compressing an uncompressed input, and returns a *bpCompressionStepData if so.
func (ic *imageCopier) bpcCompressUncompressed(stream *sourceStream, detected bpDetectCompressionStepData) (*bpCompressionStepData, error) {
if ic.c.dest.DesiredLayerCompression() == types.Compress && !detected.isCompressed {
Expand Down
165 changes: 158 additions & 7 deletions image/copy/single.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,9 +10,11 @@ import (
"maps"
"reflect"
"slices"
"sort"
"strings"
"sync"

tarpatch "github.com/containers/tar-diff/pkg/tar-patch"
digest "github.com/opencontainers/go-digest"
imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus"
Expand All @@ -30,6 +32,20 @@ import (
chunkedToc "go.podman.io/storage/pkg/chunked/toc"
)

// formatSize formats a size in bytes with dynamic units (B, KB, MB, GB)
func formatSize(size int64) string {
const unit = 1024
if size < unit {
return fmt.Sprintf("%d B", size)
}
div, exp := int64(unit), 0
for n := size / unit; n >= unit; n /= unit {
div *= unit
exp++
}
return fmt.Sprintf("%.1f %cB", float64(size)/float64(div), "KMGT"[exp])
}

// imageCopier tracks state specific to a single image (possibly an item of a manifest list)
type imageCopier struct {
c *copier
Expand Down Expand Up @@ -448,6 +464,11 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
srcInfosUpdated = true
}

deltaLayers, err := types.ImageDeltaLayers(ic.src, ctx)
if err != nil {
return nil, err
}

type copyLayerData struct {
destInfo types.BlobInfo
diffID digest.Digest
Expand All @@ -466,7 +487,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
copyGroup := sync.WaitGroup{}

data := make([]copyLayerData, len(srcInfos))
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named) {
copyLayerHelper := func(index int, srcLayer types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named, deltaLayers []types.BlobInfo) {
defer ic.c.concurrentBlobCopiesSemaphore.Release(1)
defer copyGroup.Done()
cld := copyLayerData{}
Expand All @@ -481,7 +502,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
logrus.Debugf("Skipping foreign layer %q copy to %s", cld.destInfo.Digest, ic.c.dest.Reference().Transport().Name())
}
} else {
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, srcLayer, toEncrypt, pool, index, srcRef, manifestLayerInfos[index].EmptyLayer)
cld.destInfo, cld.diffID, cld.err = ic.copyLayer(ctx, index, srcLayer, toEncrypt, pool, srcRef, manifestLayerInfos[index].EmptyLayer, deltaLayers)
}
data[index] = cld
}
Expand Down Expand Up @@ -521,7 +542,7 @@ func (ic *imageCopier) copyLayers(ctx context.Context) ([]compressiontypes.Algor
return fmt.Errorf("copying layer: %w", err)
}
copyGroup.Add(1)
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference())
go copyLayerHelper(i, srcLayer, layersToEncrypt.Contains(i), progressPool, ic.c.rawSource.Reference().DockerReference(), deltaLayers)
}

// A call to copyGroup.Wait() is done at this point by the defer above.
Expand Down Expand Up @@ -690,10 +711,85 @@ func compressionEditsFromBlobInfo(srcInfo types.BlobInfo) (types.LayerCompressio
}
}

// getMatchingDeltaLayers gets all the deltas that apply to this layer
func (ic *imageCopier) getMatchingDeltaLayers(ctx context.Context, srcIndex int, deltaLayers []types.BlobInfo) (digest.Digest, []*types.BlobInfo) {
if deltaLayers == nil {
return "", nil
}
config, _ := ic.src.OCIConfig(ctx)
if config == nil || config.RootFS.DiffIDs == nil || len(config.RootFS.DiffIDs) <= srcIndex {
return "", nil
}

layerDiffID := config.RootFS.DiffIDs[srcIndex]

var matchingLayers []*types.BlobInfo
for i := range deltaLayers {
deltaLayer := &deltaLayers[i]
to := deltaLayer.Annotations["io.github.containers.delta.to"]
if to == layerDiffID.String() {
matchingLayers = append(matchingLayers, deltaLayer)
}
}

return layerDiffID, matchingLayers
}

// resolveDeltaLayer looks at which of the matching delta froms have locally available data and picks the best one
func (ic *imageCopier) resolveDeltaLayer(ctx context.Context, matchingDeltas []*types.BlobInfo) (io.ReadCloser, tarpatch.DataSource, types.BlobInfo, error) {
// Sort smallest deltas so we favour the smallest useable one
sort.Slice(matchingDeltas, func(i, j int) bool {
return matchingDeltas[i].Size < matchingDeltas[j].Size
})

for i := range matchingDeltas {
matchingDelta := matchingDeltas[i]
from := matchingDelta.Annotations["io.github.containers.delta.from"]
fromDigest, err := digest.Parse(from)
if err != nil {
continue // Silently ignore if server specified a weird format
}

dataSource, err := types.ImageDestinationGetLayerDeltaData(ic.c.dest, ctx, fromDigest)
if err != nil {
return nil, nil, types.BlobInfo{}, err // Internal error
}
if dataSource == nil {
continue // from layer doesn't exist
}

logrus.Debugf("Using delta %v for DiffID %v", matchingDelta.Digest, fromDigest)

deltaStream, _, err := ic.c.rawSource.GetBlob(ctx, *matchingDelta, ic.c.blobInfoCache)
if err != nil {
return nil, nil, types.BlobInfo{}, fmt.Errorf("reading delta blob %s: %w", matchingDelta.Digest, err)
}
return deltaStream, dataSource, *matchingDelta, nil
}
return nil, nil, types.BlobInfo{}, nil
}

// canUseDeltas checks if deltas can be used for this layer
func (ic *imageCopier) canUseDeltas(srcInfo types.BlobInfo) (bool, string) {
// Deltas rewrite the manifest to refer to the uncompressed digest, so we must be able to substitute blobs
if !ic.canSubstituteBlobs {
return false, ""
}

switch srcInfo.MediaType {
case manifest.DockerV2Schema2LayerMediaType, manifest.DockerV2SchemaLayerMediaTypeUncompressed:
return true, manifest.DockerV2SchemaLayerMediaTypeUncompressed
case imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd:
return true, imgspecv1.MediaTypeImageLayer
}

return false, ""
}

// copyLayer copies a layer with srcInfo (with known Digest and Annotations and possibly known Size) in src to dest, perhaps (de/re/)compressing it,
// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
// srcRef can be used as an additional hint to the destination during checking whether a layer can be reused but srcRef can be nil.
func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, layerIndex int, srcRef reference.Named, emptyLayer bool) (types.BlobInfo, digest.Digest, error) {
func (ic *imageCopier) copyLayer(ctx context.Context, srcIndex int, srcInfo types.BlobInfo, toEncrypt bool, pool *mpb.Progress, srcRef reference.Named, emptyLayer bool, deltaLayers []types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
// If the srcInfo doesn't contain compression information, try to compute it from the
// MediaType, which was either read from a manifest by way of LayerInfos() or constructed
// by LayerInfosForCopy(), if it was supplied at all. If we succeed in copying the blob,
Expand All @@ -712,6 +808,59 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to

ic.c.printCopyInfo("blob", srcInfo)

// First look for a delta that matches this layer and substitute the result of that
if ok, deltaResultMediaType := ic.canUseDeltas(srcInfo); ok {
// Get deltas going TO this layer
deltaDiffID, matchingDeltas := ic.getMatchingDeltaLayers(ctx, srcIndex, deltaLayers)
// Get best possible FROM delta
deltaStream, deltaDataSource, matchingDelta, err := ic.resolveDeltaLayer(ctx, matchingDeltas)
if err != nil {
return types.BlobInfo{}, "", err
}
if deltaStream != nil {
logrus.Debugf("Applying delta for layer %s (delta size: %.1f MB)", deltaDiffID, float64(matchingDelta.Size)/(1024.0*1024.0))
bar, err := ic.c.createProgressBar(pool, false, matchingDelta, "delta", "done")
if err != nil {
return types.BlobInfo{}, "", err
}

wrappedDeltaStream := bar.ProxyReader(deltaStream)

// Convert deltaStream to uncompressed tar layer stream
pr, pw := io.Pipe()
go func() {
if err := tarpatch.Apply(wrappedDeltaStream, deltaDataSource, pw); err != nil {
// We will notice this error when failing to verify the digest, so leave it be
logrus.Infof("Failed to apply layer delta: %v", err)
}
deltaDataSource.Close()
deltaStream.Close()
wrappedDeltaStream.Close()
pw.Close()
}()
defer pr.Close()

// Copy uncompressed tar layer to destination, verifying the diffID
blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, pr, types.BlobInfo{Digest: deltaDiffID, Size: -1, MediaType: deltaResultMediaType, Annotations: srcInfo.Annotations}, true, toEncrypt, bar, srcIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
}

// Wait for diffID verification
diffIDResult := <-diffIDChan
if diffIDResult.err != nil {
return types.BlobInfo{}, "", diffIDResult.err
}

bar.SetTotal(matchingDelta.Size, true)

// Record the fact that this blob is uncompressed
ic.c.blobInfoCache.RecordDigestUncompressedPair(diffIDResult.digest, diffIDResult.digest)

return blobInfo, diffIDResult.digest, nil
}
}

diffIDIsNeeded := false
var cachedDiffID digest.Digest = ""
if ic.diffIDsAreNeeded {
Expand Down Expand Up @@ -751,7 +900,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
Cache: ic.c.blobInfoCache,
CanSubstitute: canSubstitute,
EmptyLayer: emptyLayer,
LayerIndex: &layerIndex,
LayerIndex: &srcIndex,
SrcRef: srcRef,
PossibleManifestFormats: append([]string{ic.manifestConversionPlan.preferredMIMEType}, ic.manifestConversionPlan.otherMIMETypeCandidates...),
RequiredCompression: requiredCompression,
Expand Down Expand Up @@ -813,7 +962,7 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
uploadedBlob, err := ic.c.dest.PutBlobPartial(ctx, &proxy, srcInfo, private.PutBlobPartialOptions{
Cache: ic.c.blobInfoCache,
EmptyLayer: emptyLayer,
LayerIndex: layerIndex,
LayerIndex: srcIndex,
})
if err == nil {
if srcInfo.Size != -1 {
Expand Down Expand Up @@ -856,7 +1005,9 @@ func (ic *imageCopier) copyLayer(ctx context.Context, srcInfo types.BlobInfo, to
}
defer srcStream.Close()

blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, layerIndex, emptyLayer)
logrus.Debugf("Downloading layer %s (blob size: %s)", srcInfo.Digest, formatSize(srcBlobSize))

blobInfo, diffIDChan, err := ic.copyLayerFromStream(ctx, srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize, MediaType: srcInfo.MediaType, Annotations: srcInfo.Annotations}, diffIDIsNeeded, toEncrypt, bar, srcIndex, emptyLayer)
if err != nil {
return types.BlobInfo{}, "", err
}
Expand Down
Loading
Loading