diff --git a/go.mod b/go.mod index f8ce4dd98..2215b356b 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/shipwright-io/cli go 1.24.0 require ( - github.com/google/go-containerregistry v0.20.6 + github.com/google/go-containerregistry v0.20.7 github.com/onsi/gomega v1.38.2 github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 github.com/schollz/progressbar/v3 v3.18.0 @@ -33,10 +33,10 @@ require ( github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/chai2010/gettext-go v1.0.3 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect - github.com/docker/cli v29.0.2+incompatible // indirect + github.com/docker/cli v29.0.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/emicklei/go-restful/v3 v3.12.1 // indirect @@ -69,7 +69,7 @@ require ( github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.1 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.9.0 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect @@ -96,7 +96,7 @@ require ( github.com/sirupsen/logrus v1.9.3 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/tektoncd/pipeline v1.6.0 // indirect - github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect @@ -106,7 +106,7 @@ require ( go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect golang.org/x/net v0.47.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/oauth2 v0.33.0 // indirect golang.org/x/sync v0.18.0 // indirect golang.org/x/sys v0.38.0 // indirect golang.org/x/term v0.37.0 // indirect diff --git a/go.sum b/go.sum index 4e9951296..93a48aa76 100644 --- a/go.sum +++ b/go.sum @@ -82,8 +82,8 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudevents/sdk-go/v2 v2.16.1 h1:G91iUdqvl88BZ1GYYr9vScTj5zzXSyEuqbfE63gbu9Q= github.com/cloudevents/sdk-go/v2 v2.16.1/go.mod h1:v/kVOaWjNfbvc6tkhhlkhvLapj8Aa8kvXiH5GiOHCKI= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8= -github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU= +github.com/containerd/stargz-snapshotter/estargz v0.18.1 h1:cy2/lpgBXDA3cDKSyEfNOFMA/c10O1axL69EU7iirO8= +github.com/containerd/stargz-snapshotter/estargz v0.18.1/go.mod h1:ALIEqa7B6oVDsrF37GkGN20SuvG/pIMm7FwP7ZmRb0Q= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo= github.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -93,8 +93,8 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/cli v29.0.2+incompatible h1:iLuKy2GWOSLXGp8feLYBJQVDv7m/8xoofz6lPq41x6A= -github.com/docker/cli v29.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v29.0.3+incompatible h1:8J+PZIcF2xLd6h5sHPsp5pvvJA+Sr2wGQxHkRl53a1E= +github.com/docker/cli v29.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker-credential-helpers v0.9.3 h1:gAm/VtF9wgqJMoxzT3Gj5p4AqIjCBS4wrsOh9yRqcz8= @@ -208,8 +208,8 @@ github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU= -github.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y= +github.com/google/go-containerregistry v0.20.7 h1:24VGNpS0IwrOZ2ms2P1QE3Xa5X9p4phx0aUgzYzHW6I= +github.com/google/go-containerregistry v0.20.7/go.mod h1:Lx5LCZQjLH1QBaMPeGwsME9biPeo1lPx6lbGj/UmzgM= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= @@ -262,8 +262,8 @@ github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dv github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -410,8 +410,8 @@ github.com/tektoncd/pipeline v1.6.0 h1:A+D+jzOVl2QNl/yiNT7csVgBUy2wpz6K6+/D4q5lf github.com/tektoncd/pipeline v1.6.0/go.mod h1:5SNoYgRYPQopkv7ApVq5GO3JqPk2AjV+VMMjwBsbJOg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c h1:HelZ2kAFadG0La9d+4htN4HzQ68Bm2iM9qKMSMES6xg= github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c/go.mod h1:JlzghshsemAMDGZLytTFY8C1JQxQPhnatWqNwUXjggo= -github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= -github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= @@ -491,8 +491,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -536,8 +536,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= -golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= +golang.org/x/oauth2 v0.33.0 h1:4Q+qn+E5z8gPRJfmRy7C2gGG3T4jIprK6aSYgTXGRpo= +golang.org/x/oauth2 v0.33.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -656,8 +656,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 6aba0ef1f..a9e1b72ba 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -35,6 +35,7 @@ import ( "runtime" "strings" "sync" + "sync/atomic" "github.com/containerd/stargz-snapshotter/estargz/errorutil" "github.com/klauspost/compress/zstd" @@ -42,6 +43,8 @@ import ( "golang.org/x/sync/errgroup" ) +type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) + type options struct { chunkSize int compressionLevel int @@ -50,6 +53,7 @@ type options struct { compression Compression ctx context.Context minChunkSize int + gzipHelperFunc GzipHelperFunc } type Option func(o *options) error @@ -127,11 +131,25 @@ func WithMinChunkSize(minChunkSize int) Option { } } +// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. +// When a gzip-compressed layer is detected, this function will be used instead of the +// Go standard library gzip decompression for better performance. +// The function should take an io.Reader as input and return an io.ReadCloser. +// If nil, the Go standard library gzip.NewReader will be used. +func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { + return func(o *options) error { + o.gzipHelperFunc = gzipHelperFunc + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser - diffID digest.Digester - tocDigest digest.Digest + diffID digest.Digester + tocDigest digest.Digest + readCompleted *atomic.Bool + uncompressedSize *atomic.Int64 } // DiffID returns the digest of uncompressed blob. @@ -145,6 +163,19 @@ func (b *Blob) TOCDigest() digest.Digest { return b.tocDigest } +// UncompressedSize returns the size of uncompressed blob. +// UncompressedSize should only be called after the blob has been fully read. +func (b *Blob) UncompressedSize() (int64, error) { + switch { + case b.uncompressedSize == nil || b.readCompleted == nil: + return -1, fmt.Errorf("readCompleted or uncompressedSize is not initialized") + case !b.readCompleted.Load(): + return -1, fmt.Errorf("called UncompressedSize before the blob has been fully read") + default: + return b.uncompressedSize.Load(), nil + } +} + // Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd // or plain tar) passed through the argument. If there are some prioritized files are listed in // the option, these files are grouped as "prioritized" and can be used for runtime optimization @@ -186,7 +217,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) } }() - tarBlob, err := decompressBlob(tarBlob, layerFiles) + tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) if err != nil { return nil, err } @@ -252,17 +283,28 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { } diffID := digest.Canonical.Digester() pr, pw := io.Pipe() + readCompleted := new(atomic.Bool) + uncompressedSize := new(atomic.Int64) go func() { - r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + var size int64 + var decompressFunc func(io.Reader) (io.ReadCloser, error) + if _, ok := opts.compression.(*gzipCompression); ok && opts.gzipHelperFunc != nil { + decompressFunc = opts.gzipHelperFunc + } else { + decompressFunc = opts.compression.Reader + } + decompressR, err := decompressFunc(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) if err != nil { pw.CloseWithError(err) return } - defer r.Close() - if _, err := io.Copy(diffID.Hash(), r); err != nil { + defer decompressR.Close() + if size, err = io.Copy(diffID.Hash(), decompressR); err != nil { pw.CloseWithError(err) return } + uncompressedSize.Store(size) + readCompleted.Store(true) pw.Close() }() return &Blob{ @@ -270,8 +312,10 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { Reader: pr, closeFunc: layerFiles.CleanupAll, }, - tocDigest: tocDgst, - diffID: diffID, + tocDigest: tocDgst, + diffID: diffID, + readCompleted: readCompleted, + uncompressedSize: uncompressedSize, }, nil } @@ -366,8 +410,9 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri // Sort the tar file respecting to the prioritized files list. sorted := &tarFile{} + picked := make(map[string]struct{}) for _, l := range prioritized { - if err := moveRec(l, intar, sorted); err != nil { + if err := moveRec(l, intar, sorted, picked); err != nil { if errors.Is(err, errNotFound) && missedPrioritized != nil { *missedPrioritized = append(*missedPrioritized, l) continue // allow not found @@ -395,8 +440,8 @@ func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]stri }) } - // Dump all entry and concatinate them. - return append(sorted.dump(), intar.dump()...), nil + // Dump prioritized entries followed by the rest entries while skipping picked ones. + return append(sorted.dump(nil), intar.dump(picked)...), nil } // readerFromEntries returns a reader of tar archive that contains entries passed @@ -408,11 +453,11 @@ func readerFromEntries(entries ...*entry) io.Reader { defer tw.Close() for _, entry := range entries { if err := tw.WriteHeader(entry.header); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar header: %v", err)) return } if _, err := io.Copy(tw, entry.payload); err != nil { - pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + pw.CloseWithError(fmt.Errorf("failed to write tar payload: %v", err)) return } } @@ -458,36 +503,42 @@ func importTar(in io.ReaderAt) (*tarFile, error) { return tf, nil } -func moveRec(name string, in *tarFile, out *tarFile) error { +func moveRec(name string, in *tarFile, out *tarFile, picked map[string]struct{}) error { name = cleanEntryName(name) if name == "" { // root directory. stop recursion. if e, ok := in.get(name); ok { // entry of the root directory exists. we should move it as well. // this case will occur if tar entries are prefixed with "./", "/", etc. - out.add(e) - in.remove(name) + if _, done := picked[name]; !done { + out.add(e) + picked[name] = struct{}{} + } } return nil } _, okIn := in.get(name) _, okOut := out.get(name) - if !okIn && !okOut { + _, okPicked := picked[name] + if !okIn && !okOut && !okPicked { return fmt.Errorf("file: %q: %w", name, errNotFound) } parent, _ := path.Split(strings.TrimSuffix(name, "/")) - if err := moveRec(parent, in, out); err != nil { + if err := moveRec(parent, in, out, picked); err != nil { return err } if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { - if err := moveRec(e.header.Linkname, in, out); err != nil { + if err := moveRec(e.header.Linkname, in, out, picked); err != nil { return err } } + if _, done := picked[name]; done { + return nil + } if e, ok := in.get(name); ok { out.add(e) - in.remove(name) + picked[name] = struct{}{} } return nil } @@ -533,8 +584,18 @@ func (f *tarFile) get(name string) (e *entry, ok bool) { return } -func (f *tarFile) dump() []*entry { - return f.stream +func (f *tarFile) dump(skip map[string]struct{}) []*entry { + if len(skip) == 0 { + return f.stream + } + var out []*entry + for _, e := range f.stream { + if _, ok := skip[cleanEntryName(e.header.Name)]; ok { + continue + } + out = append(out, e) + } + return out } type readCloser struct { @@ -627,12 +688,12 @@ func (cr *countReadSeeker) Seek(offset int64, whence int) (int64, error) { switch whence { default: - return 0, fmt.Errorf("Unknown whence: %v", whence) + return 0, fmt.Errorf("unknown whence: %v", whence) case io.SeekStart: case io.SeekCurrent: offset += *cr.cPos case io.SeekEnd: - return 0, fmt.Errorf("Unsupported whence: %v", whence) + return 0, fmt.Errorf("unsupported whence: %v", whence) } if offset < 0 { @@ -649,7 +710,7 @@ func (cr *countReadSeeker) currentPos() int64 { return *cr.cPos } -func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { +func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { if org.Size() < 4 { return org, nil } @@ -660,7 +721,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e var dR io.Reader if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { // gzip - dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + var dgR io.ReadCloser + var err error + if gzipHelperFunc != nil { + dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) + } else { + dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + } if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index f4d554655..ff91a37ad 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -307,6 +307,15 @@ func (r *Reader) initFields() error { } } + if len(r.m) == 0 { + r.m[""] = &TOCEntry{ + Name: "", + Type: "dir", + Mode: 0755, + NumLink: 1, + } + } + return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go index f24afe32f..88fa13b19 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -109,7 +109,7 @@ func gzipFooterBytes(tocOff int64) []byte { header[0], header[1] = 'S', 'G' subfield := fmt.Sprintf("%016xSTARGZ", tocOff) binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 - gz.Header.Extra = append(header, []byte(subfield)...) + gz.Extra = append(header, []byte(subfield)...) gz.Close() if buf.Len() != FooterSize { panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) @@ -136,7 +136,7 @@ func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, t return 0, 0, 0, err } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] if si1 != 'S' || si2 != 'G' { return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) @@ -181,7 +181,7 @@ func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOff return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) } defer zr.Close() - extra := zr.Header.Extra + extra := zr.Extra if len(extra) != 16+len("STARGZ") { return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index ba650b4d1..ff165e090 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -38,7 +38,6 @@ import ( "reflect" "sort" "strings" - "testing" "time" "github.com/containerd/stargz-snapshotter/estargz/errorutil" @@ -49,16 +48,48 @@ import ( // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - TestStreams(t *testing.T, b []byte, streams []int64) - DiffIDOf(*testing.T, []byte) string + TestStreams(t TestingT, b []byte, streams []int64) + DiffIDOf(TestingT, []byte) string String() string } +// TestingT is the minimal set of testing.T required to run the +// tests defined in CompressionTestSuite. This interface exists to prevent +// leaking the testing package from being exposed outside tests. +type TestingT interface { + Errorf(format string, args ...any) + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Logf(format string, args ...any) + Parallel() +} + +// Runner allows running subtests of TestingT. This exists instead of adding +// a Run method to TestingT interface because the Run implementation of +// testing.T would not satisfy the interface. +type Runner func(t TestingT, name string, fn func(t TestingT)) + +type TestRunner struct { + TestingT + Runner Runner +} + +func (r *TestRunner) Run(name string, run func(*TestRunner)) { + r.Runner(r.TestingT, name, func(t TestingT) { + run(&TestRunner{TestingT: t, Runner: r.Runner}) + }) +} + // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) - t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *TestRunner) { + t.Parallel() + testDigestAndVerify(t, controllers...) + }) + t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } type TestingControllerFactory func() TestingController @@ -79,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingControllerFactory) { +func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string chunkSize int @@ -165,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { prefix := prefix for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() entries, err := sortEntries(tarBlob, nil, nil) // identical order @@ -265,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { } } -func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") @@ -325,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr return true } -func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) @@ -339,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon return aJTOC.Version == bJTOC.Version } -func isSameEntries(t *testing.T, a, b *Reader) bool { +func isSameEntries(t TestingT, a, b *Reader) bool { aroot, ok := a.Lookup("") if !ok { t.Fatalf("failed to get root of A") @@ -353,18 +384,19 @@ func isSameEntries(t *testing.T, a, b *Reader) bool { return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) } -func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { +func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { buf := new(bytes.Buffer) var w io.WriteCloser var err error - if srcCompression == gzipType { + switch srcCompression { + case gzipType: w = gzip.NewWriter(buf) - } else if srcCompression == zstdType { + case zstdType: w, err = zstd.NewWriter(buf) if err != nil { t.Fatalf("failed to init zstd writer: %v", err) } - } else { + default: return src } src.Seek(0, io.SeekStart) @@ -386,7 +418,7 @@ type stargzEntry struct { // contains checks if all child entries in "b" are also contained in "a". // This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t *testing.T, a, b stargzEntry) bool { +func contains(t TestingT, a, b stargzEntry) bool { ae, ar := a.e, a.r be, br := b.e, b.r t.Logf("Comparing: %q vs %q", ae.Name, be.Name) @@ -445,7 +477,7 @@ func contains(t *testing.T, a, b stargzEntry) bool { bbytes, bnext, bok := readOffset(t, bf, nr, b) if !aok && !bok { break - } else if !(aok && bok) || anext != bnext { + } else if !aok || !bok || anext != bnext { t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", ae.Name, be.Name, nr, aok, bok, anext, bnext) return false @@ -497,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool { a.Digest == b.Digest } -func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { +func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) if !ok { return nil, 0, false @@ -516,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) return data[:n], offset + ce.ChunkSize, true } -func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { +func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { jtocData, err := json.Marshal(*tocJSON) if err != nil { t.Fatalf("failed to marshal TOC JSON: %v", err) @@ -530,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 -// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) +type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { +func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) checks []check minChunkSize []int }{ { name: "no-regfile", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( dir("test/"), ) @@ -558,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "small-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -582,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "big-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -606,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { name: "with-non-regfiles", minChunkSize: []int{0, 64000}, - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -653,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) @@ -689,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -800,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -881,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var found bool for _, e := range toc.Entries { if cleanEntryName(e.Name) == filename { @@ -901,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { t.Fatalf("rewrite target not found") } }, - "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var ( sampleEntry *TOCEntry targetEntry *TOCEntry @@ -928,7 +959,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } for name, rFunc := range funcs { - t.Run(name, func(t *testing.T) { + t.Run(name, func(t *TestRunner) { newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) buf := new(bytes.Buffer) if _, err := io.Copy(buf, newSgz); err != nil { @@ -957,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { cl := newController() rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { @@ -989,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1046,9 +1077,9 @@ func chunkID(name string, offset, size int64) string { return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) } -type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) +type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) -func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { +func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) if err != nil { t.Fatalf("failed to extract TOC JSON: %v", err) @@ -1119,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { +func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" @@ -1463,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) @@ -1529,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { if err != nil { t.Fatalf("stargz.Open: %v", err) } + if _, ok := r.Lookup(""); !ok { + t.Fatalf("failed to lookup rootdir: %v", err) + } wantTOCVersion := 1 if tt.wantTOCVersion > 0 { wantTOCVersion = tt.wantTOCVersion @@ -1627,7 +1661,7 @@ func digestFor(content string) string { type numTOCEntries int -func (n numTOCEntries) check(t *testing.T, r *Reader) { +func (n numTOCEntries) check(t TestingT, r *Reader) { if r.toc == nil { t.Fatal("nil TOC") } @@ -1647,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) { func checks(s ...stargzCheck) []stargzCheck { return s } type stargzCheck interface { - check(t *testing.T, r *Reader) + check(t TestingT, r *Reader) } -type stargzCheckFn func(*testing.T, *Reader) +type stargzCheckFn func(TestingT, *Reader) -func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } +func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup("") if !ok { t.Fatal("root directory not found") @@ -1672,7 +1706,7 @@ func maxDepth(max int) stargzCheck { }) } -func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { +func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { if current > limit { return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", current, limit) @@ -1694,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e } func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1710,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck { } func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1737,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck { } func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("didn't find TOCEntry for file %q", file) @@ -1749,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck { } func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { extraMap := make(map[string]chunkInfo) for _, e := range extra { extraMap[e.name] = e @@ -1796,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c } func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { f, err := r.OpenFile(file) if err != nil { t.Fatal(err) @@ -1813,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { } func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("no file for %q", file) @@ -1857,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck { } func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { want := append([]string(nil), want...) var got []string ent, ok := r.Lookup(dir) @@ -1876,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck { } func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1890,7 +1924,7 @@ func hasDir(file string) stargzCheck { } func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1908,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck { } func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Stat().Mode() != mode { @@ -1923,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck { } func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "symlink" { @@ -1939,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck { } func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup(name) if !ok { t.Fatalf("failed to Lookup entry %q", name) @@ -1952,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck { } func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) if !ok { t.Errorf("entry %q not found", entry) @@ -1966,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { } func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { var first *TOCEntry for _, f := range files { if first == nil { @@ -2038,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2247,7 +2281,7 @@ func noPrefetchLandmark() tarEntry { }) } -func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { +func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { if digestMap == nil { t.Fatalf("digest map mustn't be nil") } @@ -2317,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } -func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { +func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { if len(streams) == 0 { return // nop } @@ -2346,8 +2380,8 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { t.Fatalf("countStreams(gzip), Copy: %v", err) } var extra string - if len(zr.Header.Extra) > 0 { - extra = fmt.Sprintf("; extra=%q", zr.Header.Extra) + if len(zr.Extra) > 0 { + extra = fmt.Sprintf("; extra=%q", zr.Extra) } t.Logf(" [%d] at %d in stargz, uncompressed length %d%s", numStreams, zoff, n, extra) delete(wants, int64(zoff)) @@ -2355,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { } } -func GzipDiffIDOf(t *testing.T, b []byte) string { +func GzipDiffIDOf(t TestingT, b []byte) string { h := sha256.New() zr, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index 960c93b5f..b62d84826 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -27,10 +27,11 @@ import ( // docker_version and os.version are not part of the spec but included // for backwards compatibility. type ConfigFile struct { - Architecture string `json:"architecture"` - Author string `json:"author,omitempty"` - Container string `json:"container,omitempty"` - Created Time `json:"created,omitempty"` + Architecture string `json:"architecture"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + // Deprecated: This field is deprecated and will be removed in the next release. DockerVersion string `json:"docker_version,omitempty"` History []History `json:"history,omitempty"` OS string `json:"os"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go index c04479600..409877bce 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/mutate/mutate.go @@ -514,7 +514,7 @@ func Canonical(img v1.Image) (v1.Image, error) { cfg.Container = "" cfg.Config.Hostname = "" - cfg.DockerVersion = "" + cfg.DockerVersion = "" //nolint:staticcheck // Field will be removed in next release return ConfigFile(img, cfg) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index 99a2bb2eb..15b7da1e4 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -162,9 +162,14 @@ func makeOptions(opts ...Option) (*options, error) { o.transport = transport.NewLogger(o.transport) } - // Wrap the transport in something that can retry network flakes. - o.transport = transport.NewRetry(o.transport, transport.WithRetryPredicate(defaultRetryPredicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) + // Using customized retry predicate if provided, and fallback to default if not. + predicate := o.retryPredicate + if predicate == nil { + predicate = defaultRetryPredicate + } + // Wrap the transport in something that can retry network flakes. + o.transport = transport.NewRetry(o.transport, transport.WithRetryBackoff(o.retryBackoff), transport.WithRetryPredicate(predicate), transport.WithRetryStatusCodes(o.retryStatusCodes...)) // Wrap this last to prevent transport.New from double-wrapping. if o.userAgent != "" { o.transport = transport.NewUserAgent(o.transport, o.userAgent) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index 482a4adee..d38e67624 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -25,7 +25,7 @@ import ( ) // Error implements error to support the following error specification: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors type Error struct { Errors []Diagnostic `json:"errors,omitempty"` // The http status code returned. @@ -111,7 +111,7 @@ func (d Diagnostic) String() string { type ErrorCode string // The set of error conditions a registry may return: -// https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors-2 +// https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors-2 const ( BlobUnknownErrorCode ErrorCode = "BLOB_UNKNOWN" BlobUploadInvalidErrorCode ErrorCode = "BLOB_UPLOAD_INVALID" @@ -170,7 +170,7 @@ func CheckError(resp *http.Response, codes ...int) error { } func makeError(resp *http.Response, body []byte) *Error { - // https://github.com/docker/distribution/blob/master/docs/spec/api.md#errors + // https://github.com/distribution/distribution/blob/aac2f6c8b7c5a6c60190848bab5cbeed2b5ba0a9/docs/spec/api.md#errors structuredError := &Error{} // This can fail if e.g. the response body is not valid JSON. That's fine, diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7..d58b3fe42 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f..8c8baa4fc 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac..41db94cde 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f..a97cf1b5d 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d7..7d0efa881 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b0..99ddd4af9 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0..67d9e05b6 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e2..4f2a0d8c5 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd90..218a38bc4 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d..a2c82fcd2 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5..860a99416 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b..b22b297e6 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2..2329e996f 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376..30df5513d 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164b..2ffbfdf37 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f..c1192ec38 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde7..85dcd28c3 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8..cf8cad00d 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7d..9180a3a58 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7..d88f067e5 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8..3a0f4e7fb 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9..0bfb0e43c 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07..1f8c3cec2 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 000000000..2efc0497b --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f..336c28893 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4..3198d7189 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae..1a869710d 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index dee9e47e4..e687a08c9 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -34,6 +34,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 248a7ccb1..a645c4160 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -581,12 +581,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -619,8 +624,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/vendor/golang.org/x/oauth2/deviceauth.go b/vendor/golang.org/x/oauth2/deviceauth.go index e99c92f39..e783a9437 100644 --- a/vendor/golang.org/x/oauth2/deviceauth.go +++ b/vendor/golang.org/x/oauth2/deviceauth.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "mime" "net/http" "net/url" "strings" @@ -116,10 +117,38 @@ func retrieveDeviceAuth(ctx context.Context, c *Config, v url.Values) (*DeviceAu return nil, fmt.Errorf("oauth2: cannot auth device: %v", err) } if code := r.StatusCode; code < 200 || code > 299 { - return nil, &RetrieveError{ + retrieveError := &RetrieveError{ Response: r, Body: body, } + + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + // some endpoints return a query string + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = vals.Get("error") + retrieveError.ErrorDescription = vals.Get("error_description") + retrieveError.ErrorURI = vals.Get("error_uri") + default: + var tj struct { + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + ErrorCode string `json:"error"` + ErrorDescription string `json:"error_description"` + ErrorURI string `json:"error_uri"` + } + if json.Unmarshal(body, &tj) != nil { + return nil, retrieveError + } + retrieveError.ErrorCode = tj.ErrorCode + retrieveError.ErrorDescription = tj.ErrorDescription + retrieveError.ErrorURI = tj.ErrorURI + } + + return nil, retrieveError } da := &DeviceAuthResponse{} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index de34feb84..5c527d31f 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -9,7 +9,6 @@ package oauth2 // import "golang.org/x/oauth2" import ( - "bytes" "context" "errors" "net/http" @@ -99,7 +98,7 @@ const ( // in the POST body as application/x-www-form-urlencoded parameters. AuthStyleInParams AuthStyle = 1 - // AuthStyleInHeader sends the client_id and client_password + // AuthStyleInHeader sends the client_id and client_secret // using HTTP Basic Authorization. This is an optional style // described in the OAuth2 RFC 6749 section 2.3.1. AuthStyleInHeader AuthStyle = 2 @@ -158,7 +157,7 @@ func SetAuthURLParam(key, value string) AuthCodeOption { // PKCE), https://www.oauth.com/oauth2-servers/pkce/ and // https://www.ietf.org/archive/id/draft-ietf-oauth-v2-1-09.html#name-cross-site-request-forgery (describing both approaches) func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { - var buf bytes.Buffer + var buf strings.Builder buf.WriteString(c.Endpoint.AuthURL) v := url.Values{ "response_type": {"code"}, diff --git a/vendor/golang.org/x/oauth2/pkce.go b/vendor/golang.org/x/oauth2/pkce.go index cea8374d5..f99384f0f 100644 --- a/vendor/golang.org/x/oauth2/pkce.go +++ b/vendor/golang.org/x/oauth2/pkce.go @@ -51,7 +51,7 @@ func S256ChallengeFromVerifier(verifier string) string { return base64.RawURLEncoding.EncodeToString(sha[:]) } -// S256ChallengeOption derives a PKCE code challenge derived from verifier with +// S256ChallengeOption derives a PKCE code challenge from the verifier with // method S256. It should be passed to [Config.AuthCodeURL] or [Config.DeviceAuth] // only. func S256ChallengeOption(verifier string) AuthCodeOption { diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go index 239ec3296..e995eebb5 100644 --- a/vendor/golang.org/x/oauth2/token.go +++ b/vendor/golang.org/x/oauth2/token.go @@ -103,7 +103,7 @@ func (t *Token) WithExtra(extra any) *Token { } // Extra returns an extra field. -// Extra fields are key-value pairs returned by the server as a +// Extra fields are key-value pairs returned by the server as // part of the token retrieval response. func (t *Token) Extra(key string) any { if raw, ok := t.raw.(map[string]any); ok { diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 8bbebbac9..9922ec331 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -58,7 +58,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { var cancelOnce sync.Once // CancelRequest does nothing. It used to be a legacy cancellation mechanism -// but now only it only logs on first use to warn that it's deprecated. +// but now only logs on first use to warn that it's deprecated. // // Deprecated: use contexts for cancellation instead. func (t *Transport) CancelRequest(req *http.Request) { diff --git a/vendor/modules.txt b/vendor/modules.txt index 09feda610..b5d42d0a7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -43,8 +43,8 @@ github.com/chai2010/gettext-go github.com/chai2010/gettext-go/mo github.com/chai2010/gettext-go/plural github.com/chai2010/gettext-go/po -# github.com/containerd/stargz-snapshotter/estargz v0.16.3 -## explicit; go 1.22.0 +# github.com/containerd/stargz-snapshotter/estargz v0.18.1 +## explicit; go 1.24.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/cpuguy83/go-md2man/v2 v2.0.7 @@ -53,7 +53,7 @@ github.com/cpuguy83/go-md2man/v2/md2man # github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew -# github.com/docker/cli v29.0.2+incompatible +# github.com/docker/cli v29.0.3+incompatible ## explicit github.com/docker/cli/cli/config github.com/docker/cli/cli/config/configfile @@ -169,8 +169,8 @@ github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags github.com/google/go-cmp/cmp/internal/function github.com/google/go-cmp/cmp/internal/value -# github.com/google/go-containerregistry v0.20.6 -## explicit; go 1.24 +# github.com/google/go-containerregistry v0.20.7 +## explicit; go 1.24.0 github.com/google/go-containerregistry/internal/and github.com/google/go-containerregistry/internal/compression github.com/google/go-containerregistry/internal/estargz @@ -223,8 +223,8 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.1 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/fse github.com/klauspost/compress/huff0 @@ -385,7 +385,7 @@ github.com/tektoncd/pipeline/pkg/substitution # github.com/texttheater/golang-levenshtein/levenshtein v0.0.0-20200805054039-cae8b0eaed6c ## explicit; go 1.13 github.com/texttheater/golang-levenshtein/levenshtein -# github.com/vbatts/tar-split v0.12.1 +# github.com/vbatts/tar-split v0.12.2 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar # github.com/x448/float16 v0.8.4 @@ -451,8 +451,8 @@ golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.30.0 -## explicit; go 1.23.0 +# golang.org/x/oauth2 v0.33.0 +## explicit; go 1.24.0 golang.org/x/oauth2 golang.org/x/oauth2/internal # golang.org/x/sync v0.18.0