diff --git a/go.mod b/go.mod index 8853b7fb396..3e09d932ccc 100644 --- a/go.mod +++ b/go.mod @@ -64,9 +64,9 @@ require ( github.com/stretchr/testify v1.11.1 github.com/vbauerster/mpb/v8 v8.10.2 github.com/vishvananda/netlink v1.3.1 - go.podman.io/common v0.66.0 - go.podman.io/image/v5 v5.38.0 - go.podman.io/storage v1.61.0 + go.podman.io/common v0.66.1-0.20251105205746-f394a8a49d5c + go.podman.io/image/v5 v5.38.1-0.20251105205746-f394a8a49d5c + go.podman.io/storage v1.61.1-0.20251105205746-f394a8a49d5c golang.org/x/crypto v0.43.0 golang.org/x/net v0.45.0 golang.org/x/sync v0.17.0 @@ -92,7 +92,7 @@ require ( github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect github.com/containerd/platforms v1.0.0-rc.1 // indirect - github.com/containerd/stargz-snapshotter/estargz v0.17.0 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.18.0 // indirect github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containernetworking/cni v1.3.0 // indirect github.com/containers/common v0.62.2 // indirect @@ -126,7 +126,7 @@ require ( github.com/hashicorp/go-retryablehttp v0.7.8 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jinzhu/copier v0.4.0 // indirect - github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/compress v1.18.1 // indirect github.com/kr/fs v0.1.0 // indirect github.com/letsencrypt/boulder v0.0.0-20240620165639-de9c06129bec // indirect github.com/lufia/plan9stats v0.0.0-20240909124753-873cd0166683 // indirect @@ -134,7 +134,7 @@ require ( github.com/mattn/go-runewidth v0.0.16 // indirect github.com/mdlayher/socket v0.5.1 // indirect github.com/miekg/pkcs11 v1.1.1 // indirect - github.com/mistifyio/go-zfs/v3 v3.1.0 // indirect + github.com/mistifyio/go-zfs/v4 v4.0.0 // indirect github.com/moby/buildkit v0.25.1 // indirect github.com/moby/go-archive v0.1.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect @@ -146,7 +146,7 @@ require ( github.com/morikuni/aec v1.0.0 // indirect github.com/opencontainers/runc v1.3.2 // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pkg/sftp v1.13.9 // indirect + github.com/pkg/sftp v1.13.10 // indirect github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect @@ -169,7 +169,7 @@ require ( github.com/tklauser/numcpus v0.10.0 // indirect github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 // indirect github.com/ulikunitz/xz v0.5.15 // indirect - github.com/vbatts/tar-split v0.12.1 // indirect + github.com/vbatts/tar-split v0.12.2 // indirect github.com/vishvananda/netns v0.0.5 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.etcd.io/bbolt v1.4.3 // indirect diff --git a/go.sum b/go.sum index 54c6cb669c9..2bed65f61d6 100644 --- a/go.sum +++ b/go.sum @@ -47,8 +47,8 @@ github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/containerd/platforms v1.0.0-rc.1 h1:83KIq4yy1erSRgOVHNk1HYdPvzdJ5CnsWaRoJX4C41E= github.com/containerd/platforms v1.0.0-rc.1/go.mod h1:J71L7B+aiM5SdIEqmd9wp6THLVRzJGXfNuWCZCllLA4= -github.com/containerd/stargz-snapshotter/estargz v0.17.0 h1:+TyQIsR/zSFI1Rm31EQBwpAA1ovYgIKHy7kctL3sLcE= -github.com/containerd/stargz-snapshotter/estargz v0.17.0/go.mod h1:s06tWAiJcXQo9/8AReBCIo/QxcXFZ2n4qfsRnpl71SM= +github.com/containerd/stargz-snapshotter/estargz v0.18.0 h1:Ny5yptQgEXSkDFKvlKJGTvf1YJ+4xD8V+hXqoRG0n74= +github.com/containerd/stargz-snapshotter/estargz v0.18.0/go.mod h1:7hfU1BO2KB3axZl0dRQCdnHrIWw7TRDdK6L44Rdeuo0= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= @@ -102,8 +102,8 @@ github.com/disiqueira/gotree/v3 v3.0.2 h1:ik5iuLQQoufZBNPY518dXhiO5056hyNBIK9lWh github.com/disiqueira/gotree/v3 v3.0.2/go.mod h1:ZuyjE4+mUQZlbpkI24AmruZKhg3VHEgPLDY8Qk+uUu8= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/cli v28.5.1+incompatible h1:ESutzBALAD6qyCLqbQSEf1a/U8Ybms5agw59yGVc+yY= -github.com/docker/cli v28.5.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v28.5.2+incompatible h1:XmG99IHcBmIAoC1PPg9eLBZPlTrNUAijsHLm8PjhBlg= +github.com/docker/cli v28.5.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk= github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM= @@ -221,8 +221,8 @@ github.com/kevinburke/ssh_config v1.4.0 h1:6xxtP5bZ2E4NF5tuQulISpTO2z8XbtH8cg1PW github.com/kevinburke/ssh_config v1.4.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/compress v1.18.1 h1:bcSGx7UbpBqMChDtsF28Lw6v/G94LPrrbMbdC3JH2co= +github.com/klauspost/compress v1.18.1/go.mod h1:ZQFFVG+MdnR0P+l6wpXgIL4NTtwiKIdBnrBd8Nrxr+0= github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU= github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8= @@ -261,8 +261,8 @@ github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3v github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/mistifyio/go-zfs/v3 v3.1.0 h1:FZaylcg0hjUp27i23VcJJQiuBeAZjrC8lPqCGM1CopY= -github.com/mistifyio/go-zfs/v3 v3.1.0/go.mod h1:CzVgeB0RvF2EGzQnytKVvVSDwmKJXxkOTUGbNrTja/k= +github.com/mistifyio/go-zfs/v4 v4.0.0 h1:sU0+5dX45tdDK5xNZ3HBi95nxUc48FS92qbIZEvpAg4= +github.com/mistifyio/go-zfs/v4 v4.0.0/go.mod h1:weotFtXTHvBwhr9Mv96KYnDkTPBOHFUbm9cBmQpesL0= github.com/moby/buildkit v0.25.1 h1:j7IlVkeNbEo+ZLoxdudYCHpmTsbwKvhgc/6UJ/mY/o8= github.com/moby/buildkit v0.25.1/go.mod h1:phM8sdqnvgK2y1dPDnbwI6veUCXHOZ6KFSl6E164tkc= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= @@ -320,8 +320,8 @@ github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.9 h1:4NGkvGudBL7GteO3m6qnaQ4pC0Kvf0onSVc9gR3EWBw= -github.com/pkg/sftp v1.13.9/go.mod h1:OBN7bVXdstkFFN/gdnHPUb5TE8eb8G1Rp9wCItqjkkA= +github.com/pkg/sftp v1.13.10 h1:+5FbKNTe5Z9aspU88DPIKJ9z2KZoaGCu6Sr6kKR/5mU= +github.com/pkg/sftp v1.13.10/go.mod h1:bJ1a7uDhrX/4OII+agvy28lzRvQrmIQuaHrcI1HbeGA= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -383,12 +383,9 @@ github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3A github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 h1:pnnLyeX7o/5aX8qUQ69P/mLojDqwda8hFOCBTmP/6hw= github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6/go.mod h1:39R/xuhNgVhi+K0/zst4TLrJrVmbm6LVgl4A0+ZFS5M= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/sylabs/sif/v2 v2.22.0 h1:Y+xXufp4RdgZe02SR3nWEg7S6q4tPWN237WHYzkDSKA= @@ -417,8 +414,8 @@ github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701 h1:pyC9PaHYZFgEKFdlp3G8 github.com/u-root/uio v0.0.0-20240224005618-d2acac8f3701/go.mod h1:P3a5rG4X7tI17Nn3aOIAYr5HbIMukwXG0urG0WuL8OA= github.com/ulikunitz/xz v0.5.15 h1:9DNdB5s+SgV3bQ2ApL10xRc35ck0DuIX/isZvIk+ubY= github.com/ulikunitz/xz v0.5.15/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= -github.com/vbatts/tar-split v0.12.1 h1:CqKoORW7BUWBe7UL/iqTVvkTBOF8UvOMKOIZykxnnbo= -github.com/vbatts/tar-split v0.12.1/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= +github.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4= +github.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA= github.com/vbauerster/mpb/v8 v8.10.2 h1:2uBykSHAYHekE11YvJhKxYmLATKHAGorZwFlyNw4hHM= github.com/vbauerster/mpb/v8 v8.10.2/go.mod h1:+Ja4P92E3/CorSZgfDtK46D7AVbDqmBQRTmyTqPElo0= github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= @@ -469,12 +466,12 @@ go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKr go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA= go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.podman.io/common v0.66.0 h1:KElE3HKLFdMdJL+jv5ExBiX2Dh4Qcv8ovmzaBGRsyZM= -go.podman.io/common v0.66.0/go.mod h1:aNd2a0S7pY+fx1X5kpQYuF4hbwLU8ZOccuVrhu7h1Xc= -go.podman.io/image/v5 v5.38.0 h1:aUKrCANkPvze1bnhLJsaubcfz0d9v/bSDLnwsXJm6G4= -go.podman.io/image/v5 v5.38.0/go.mod h1:hSIoIUzgBnmc4DjoIdzk63aloqVbD7QXDMkSE/cvG90= -go.podman.io/storage v1.61.0 h1:5hD/oyRYt1f1gxgvect+8syZBQhGhV28dCw2+CZpx0Q= -go.podman.io/storage v1.61.0/go.mod h1:A3UBK0XypjNZ6pghRhuxg62+2NIm5lcUGv/7XyMhMUI= +go.podman.io/common v0.66.1-0.20251105205746-f394a8a49d5c h1:N9meddDzC7MUfapeEGwZsrANI7SGoOE7PTlv3rsxMHI= +go.podman.io/common v0.66.1-0.20251105205746-f394a8a49d5c/go.mod h1:kiE7arFYBOigjB6XCv8xDKi/HSTQWDu20LpKM/J2giM= +go.podman.io/image/v5 v5.38.1-0.20251105205746-f394a8a49d5c h1:oo7vfOhdZKdPrQvsmXecI6KG7hSEFwNZs13zF3buAPw= +go.podman.io/image/v5 v5.38.1-0.20251105205746-f394a8a49d5c/go.mod h1:spRKro2jNrJUxUb8M7NgCCQu/k0vi/BezbbdELV2JNc= +go.podman.io/storage v1.61.1-0.20251105205746-f394a8a49d5c h1:Sgu/DhA+FFjGDnxi1bFe1VUYQdyzUVNrUWllR3SSUjo= +go.podman.io/storage v1.61.1-0.20251105205746-f394a8a49d5c/go.mod h1:oLjD+/c+LyFgYoKLxXjPd0Wr0q5eAqFEOM4kaujLVvw= go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI= go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= @@ -487,7 +484,6 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/crypto v0.30.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM= diff --git a/pkg/machine/ocipull/ociartifact.go b/pkg/machine/ocipull/ociartifact.go index 9ff1acd1418..16789c00e4b 100644 --- a/pkg/machine/ocipull/ociartifact.go +++ b/pkg/machine/ocipull/ociartifact.go @@ -24,9 +24,6 @@ import ( ) const ( - artifactRegistry = "quay.io" - artifactRepo = "podman" - artifactImageName = "machine-os" artifactOriginalName = specV1.AnnotationTitle machineOS = "linux" ) @@ -92,9 +89,25 @@ func NewOCIArtifactPull(ctx context.Context, dirs *define.MachineDirs, endpoint cache := false if endpoint == "" { - imageName := artifactImageName - endpoint = fmt.Sprintf("docker://%s/%s/%s:%s", artifactRegistry, artifactRepo, imageName, artifactVersion.majorMinor()) - cache = true + return nil, fmt.Errorf("no machine image endpoint provided") + } + + // Automatically append the current version as a tag if endpoint has no tag. + // This allows endpoints in containers.conf to be version-agnostic: they won't need to be + // updated on each version bump, while still pulling the correct version-specific image. + if image, ok := strings.CutPrefix(endpoint, "docker://"); ok { + ref, err := reference.ParseNormalizedNamed(image) + if err == nil { + // Only add version tag if no tag is specified (digest-only refs are left alone) + if _, hasTag := ref.(reference.Tagged); !hasTag { + taggedRef, err := reference.WithTag(ref, artifactVersion.majorMinor()) + if err != nil { + return nil, fmt.Errorf("failed to add version tag to %q: %w", endpoint, err) + } + endpoint = "docker://" + taggedRef.String() + } + } + // If parsing failed, just continue with the original endpoint } ociDisk := OCIArtifactDisk{ diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go index 8b804b7dde4..16bdc90b202 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -42,6 +42,8 @@ import ( "golang.org/x/sync/errgroup" ) +type GzipHelperFunc func(io.Reader) (io.ReadCloser, error) + type options struct { chunkSize int compressionLevel int @@ -50,6 +52,7 @@ type options struct { compression Compression ctx context.Context minChunkSize int + gzipHelperFunc GzipHelperFunc } type Option func(o *options) error @@ -127,6 +130,18 @@ func WithMinChunkSize(minChunkSize int) Option { } } +// WithGzipHelperFunc option specifies a custom function to decompress gzip-compressed layers. +// When a gzip-compressed layer is detected, this function will be used instead of the +// Go standard library gzip decompression for better performance. +// The function should take an io.Reader as input and return an io.ReadCloser. +// If nil, the Go standard library gzip.NewReader will be used. +func WithGzipHelperFunc(gzipHelperFunc GzipHelperFunc) Option { + return func(o *options) error { + o.gzipHelperFunc = gzipHelperFunc + return nil + } +} + // Blob is an eStargz blob. type Blob struct { io.ReadCloser @@ -186,7 +201,7 @@ func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { rErr = fmt.Errorf("error from context %q: %w", cErr, rErr) } }() - tarBlob, err := decompressBlob(tarBlob, layerFiles) + tarBlob, err := decompressBlob(tarBlob, layerFiles, opts.gzipHelperFunc) if err != nil { return nil, err } @@ -649,7 +664,7 @@ func (cr *countReadSeeker) currentPos() int64 { return *cr.cPos } -func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { +func decompressBlob(org *io.SectionReader, tmp *tempFiles, gzipHelperFunc GzipHelperFunc) (*io.SectionReader, error) { if org.Size() < 4 { return org, nil } @@ -660,7 +675,13 @@ func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, e var dR io.Reader if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { // gzip - dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + var dgR io.ReadCloser + var err error + if gzipHelperFunc != nil { + dgR, err = gzipHelperFunc(io.NewSectionReader(org, 0, org.Size())) + } else { + dgR, err = gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + } if err != nil { return nil, err } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go index f4d55465584..ff91a37add5 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -307,6 +307,15 @@ func (r *Reader) initFields() error { } } + if len(r.m) == 0 { + r.m[""] = &TOCEntry{ + Name: "", + Type: "dir", + Mode: 0755, + NumLink: 1, + } + } + return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go index a8dcdb868e6..ff165e090e3 100644 --- a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -38,7 +38,6 @@ import ( "reflect" "sort" "strings" - "testing" "time" "github.com/containerd/stargz-snapshotter/estargz/errorutil" @@ -49,16 +48,48 @@ import ( // TestingController is Compression with some helper methods necessary for testing. type TestingController interface { Compression - TestStreams(t *testing.T, b []byte, streams []int64) - DiffIDOf(*testing.T, []byte) string + TestStreams(t TestingT, b []byte, streams []int64) + DiffIDOf(TestingT, []byte) string String() string } +// TestingT is the minimal set of testing.T required to run the +// tests defined in CompressionTestSuite. This interface exists to prevent +// leaking the testing package from being exposed outside tests. +type TestingT interface { + Errorf(format string, args ...any) + FailNow() + Failed() bool + Fatal(args ...any) + Fatalf(format string, args ...any) + Logf(format string, args ...any) + Parallel() +} + +// Runner allows running subtests of TestingT. This exists instead of adding +// a Run method to TestingT interface because the Run implementation of +// testing.T would not satisfy the interface. +type Runner func(t TestingT, name string, fn func(t TestingT)) + +type TestRunner struct { + TestingT + Runner Runner +} + +func (r *TestRunner) Run(name string, run func(*TestRunner)) { + r.Runner(r.TestingT, name, func(t TestingT) { + run(&TestRunner{TestingT: t, Runner: r.Runner}) + }) +} + // CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. -func CompressionTestSuite(t *testing.T, controllers ...TestingControllerFactory) { - t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) - t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) - t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +func CompressionTestSuite(t *TestRunner, controllers ...TestingControllerFactory) { + t.Run("testBuild", func(t *TestRunner) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *TestRunner) { + t.Parallel() + testDigestAndVerify(t, controllers...) + }) + t.Run("testWriteAndOpen", func(t *TestRunner) { t.Parallel(); testWriteAndOpen(t, controllers...) }) } type TestingControllerFactory func() TestingController @@ -79,7 +110,7 @@ var allowedPrefix = [4]string{"", "./", "/", "../"} // testBuild tests the resulting stargz blob built by this pkg has the same // contents as the normal stargz blob. -func testBuild(t *testing.T, controllers ...TestingControllerFactory) { +func testBuild(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string chunkSize int @@ -165,7 +196,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { prefix := prefix for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s,minChunkSize=%d", newCL(), prefix, srcCompression, srcTarFormat, minChunkSize), func(t *TestRunner) { tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) // Test divideEntries() entries, err := sortEntries(tarBlob, nil, nil) // identical order @@ -265,7 +296,7 @@ func testBuild(t *testing.T, controllers ...TestingControllerFactory) { } } -func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameTarGz(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aGz, err := cla.Reader(bytes.NewReader(a)) if err != nil { t.Fatalf("failed to read A") @@ -325,7 +356,7 @@ func isSameTarGz(t *testing.T, cla TestingController, a []byte, clb TestingContr return true } -func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingController, b []byte) bool { +func isSameVersion(t TestingT, cla TestingController, a []byte, clb TestingController, b []byte) bool { aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), cla) if err != nil { t.Fatalf("failed to parse A: %v", err) @@ -339,7 +370,7 @@ func isSameVersion(t *testing.T, cla TestingController, a []byte, clb TestingCon return aJTOC.Version == bJTOC.Version } -func isSameEntries(t *testing.T, a, b *Reader) bool { +func isSameEntries(t TestingT, a, b *Reader) bool { aroot, ok := a.Lookup("") if !ok { t.Fatalf("failed to get root of A") @@ -353,7 +384,7 @@ func isSameEntries(t *testing.T, a, b *Reader) bool { return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) } -func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { +func compressBlob(t TestingT, src *io.SectionReader, srcCompression int) *io.SectionReader { buf := new(bytes.Buffer) var w io.WriteCloser var err error @@ -387,7 +418,7 @@ type stargzEntry struct { // contains checks if all child entries in "b" are also contained in "a". // This function also checks if the files/chunks contain the same contents among "a" and "b". -func contains(t *testing.T, a, b stargzEntry) bool { +func contains(t TestingT, a, b stargzEntry) bool { ae, ar := a.e, a.r be, br := b.e, b.r t.Logf("Comparing: %q vs %q", ae.Name, be.Name) @@ -498,7 +529,7 @@ func equalEntry(a, b *TOCEntry) bool { a.Digest == b.Digest } -func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { +func readOffset(t TestingT, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) if !ok { return nil, 0, false @@ -517,7 +548,7 @@ func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) return data[:n], offset + ce.ChunkSize, true } -func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { +func dumpTOCJSON(t TestingT, tocJSON *JTOC) string { jtocData, err := json.Marshal(*tocJSON) if err != nil { t.Fatalf("failed to marshal TOC JSON: %v", err) @@ -531,20 +562,19 @@ func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { const chunkSize = 3 -// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) -type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) +type check func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) // testDigestAndVerify runs specified checks against sample stargz blobs. -func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { +func testDigestAndVerify(t *TestRunner, controllers ...TestingControllerFactory) { tests := []struct { name string - tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + tarInit func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) checks []check minChunkSize []int }{ { name: "no-regfile", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( dir("test/"), ) @@ -559,7 +589,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "small-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -583,7 +613,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) }, { name: "big-files", - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -607,7 +637,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) { name: "with-non-regfiles", minChunkSize: []int{0, 64000}, - tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + tarInit: func(t TestingT, dgstMap map[string]digest.Digest) (blob []tarEntry) { return tarOf( regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), regDigest(t, "foo.txt", "a", dgstMap), @@ -654,7 +684,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) srcTarFormat := srcTarFormat for _, minChunkSize := range tt.minChunkSize { minChunkSize := minChunkSize - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s,minChunkSize=%d", newCL(), prefix, srcTarFormat, minChunkSize), func(t *TestRunner) { // Get original tar file and chunk digests dgstMap := make(map[string]digest.Digest) tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) @@ -690,7 +720,7 @@ func testDigestAndVerify(t *testing.T, controllers ...TestingControllerFactory) // checkStargzTOC checks the TOC JSON of the passed stargz has the expected // digest and contains valid chunks. It walks all entries in the stargz and // checks all chunk digests stored to the TOC JSON match the actual contents. -func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkStargzTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -801,7 +831,7 @@ func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyTOC checks the verification works for the TOC JSON of the passed // stargz. It walks all entries in the stargz and checks the verifications for // all chunks work. -func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { +func checkVerifyTOC(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), WithDecompressors(controller), @@ -882,9 +912,9 @@ func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstM // checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be // detected during the verification and the verification returns an error. func checkVerifyInvalidTOCEntryFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { funcs := map[string]rewriteFunc{ - "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "lost digest in a entry": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var found bool for _, e := range toc.Entries { if cleanEntryName(e.Name) == filename { @@ -902,7 +932,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { t.Fatalf("rewrite target not found") } }, - "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + "duplicated entry offset": func(t TestingT, toc *JTOC, sgz *io.SectionReader) { var ( sampleEntry *TOCEntry targetEntry *TOCEntry @@ -929,7 +959,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { } for name, rFunc := range funcs { - t.Run(name, func(t *testing.T) { + t.Run(name, func(t *TestRunner) { newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) buf := new(bytes.Buffer) if _, err := io.Copy(buf, newSgz); err != nil { @@ -958,7 +988,7 @@ func checkVerifyInvalidTOCEntryFail(filename string) check { // checkVerifyInvalidStargzFail checks if the verification detects that the // given stargz file doesn't match to the expected digest and returns error. func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { cl := newController() rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(cl)) if err != nil { @@ -990,7 +1020,7 @@ func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { // checkVerifyBrokenContentFail checks if the verifier detects broken contents // that doesn't match to the expected digest and returns error. func checkVerifyBrokenContentFail(filename string) check { - return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { + return func(t *TestRunner, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController, newController TestingControllerFactory) { // Parse stargz file sgz, err := Open( io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), @@ -1047,9 +1077,9 @@ func chunkID(name string, offset, size int64) string { return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) } -type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) +type rewriteFunc func(t TestingT, toc *JTOC, sgz *io.SectionReader) -func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { +func rewriteTOCJSON(t TestingT, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) if err != nil { t.Fatalf("failed to extract TOC JSON: %v", err) @@ -1120,7 +1150,7 @@ func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJT return decodedJTOC, tocOffset, nil } -func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { +func testWriteAndOpen(t *TestRunner, controllers ...TestingControllerFactory) { const content = "Some contents" invalidUtf8 := "\xff\xfe\xfd" @@ -1464,7 +1494,7 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { srcTarFormat := srcTarFormat for _, lossless := range []bool{true, false} { - t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *testing.T) { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", newCL(), prefix, lossless, srcTarFormat), func(t *TestRunner) { var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) origTarDgstr := digest.Canonical.Digester() tr = io.TeeReader(tr, origTarDgstr.Hash()) @@ -1530,6 +1560,9 @@ func testWriteAndOpen(t *testing.T, controllers ...TestingControllerFactory) { if err != nil { t.Fatalf("stargz.Open: %v", err) } + if _, ok := r.Lookup(""); !ok { + t.Fatalf("failed to lookup rootdir: %v", err) + } wantTOCVersion := 1 if tt.wantTOCVersion > 0 { wantTOCVersion = tt.wantTOCVersion @@ -1628,7 +1661,7 @@ func digestFor(content string) string { type numTOCEntries int -func (n numTOCEntries) check(t *testing.T, r *Reader) { +func (n numTOCEntries) check(t TestingT, r *Reader) { if r.toc == nil { t.Fatal("nil TOC") } @@ -1648,15 +1681,15 @@ func (n numTOCEntries) check(t *testing.T, r *Reader) { func checks(s ...stargzCheck) []stargzCheck { return s } type stargzCheck interface { - check(t *testing.T, r *Reader) + check(t TestingT, r *Reader) } -type stargzCheckFn func(*testing.T, *Reader) +type stargzCheckFn func(TestingT, *Reader) -func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } +func (f stargzCheckFn) check(t TestingT, r *Reader) { f(t, r) } func maxDepth(max int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup("") if !ok { t.Fatal("root directory not found") @@ -1673,7 +1706,7 @@ func maxDepth(max int) stargzCheck { }) } -func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { +func getMaxDepth(t TestingT, e *TOCEntry, current, limit int) (max int, rErr error) { if current > limit { return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", current, limit) @@ -1695,7 +1728,7 @@ func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr e } func hasFileLen(file string, wantLen int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1711,7 +1744,7 @@ func hasFileLen(file string, wantLen int) stargzCheck { } func hasFileXattrs(file, name, value string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "reg" { @@ -1738,7 +1771,7 @@ func hasFileXattrs(file, name, value string) stargzCheck { } func hasFileDigest(file string, digest string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("didn't find TOCEntry for file %q", file) @@ -1750,7 +1783,7 @@ func hasFileDigest(file string, digest string) stargzCheck { } func hasFileContentsWithPreRead(file string, offset int, want string, extra ...chunkInfo) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { extraMap := make(map[string]chunkInfo) for _, e := range extra { extraMap[e.name] = e @@ -1797,7 +1830,7 @@ func hasFileContentsWithPreRead(file string, offset int, want string, extra ...c } func hasFileContentsRange(file string, offset int, want string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { f, err := r.OpenFile(file) if err != nil { t.Fatal(err) @@ -1814,7 +1847,7 @@ func hasFileContentsRange(file string, offset int, want string) stargzCheck { } func hasChunkEntries(file string, wantChunks int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(file) if !ok { t.Fatalf("no file for %q", file) @@ -1858,7 +1891,7 @@ func hasChunkEntries(file string, wantChunks int) stargzCheck { } func entryHasChildren(dir string, want ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { want := append([]string(nil), want...) var got []string ent, ok := r.Lookup(dir) @@ -1877,7 +1910,7 @@ func entryHasChildren(dir string, want ...string) stargzCheck { } func hasDir(file string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1891,7 +1924,7 @@ func hasDir(file string) stargzCheck { } func hasDirLinkCount(file string, count int) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Type != "dir" { @@ -1909,7 +1942,7 @@ func hasDirLinkCount(file string, count int) stargzCheck { } func hasMode(file string, mode os.FileMode) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == cleanEntryName(file) { if ent.Stat().Mode() != mode { @@ -1924,7 +1957,7 @@ func hasMode(file string, mode os.FileMode) stargzCheck { } func hasSymlink(file, target string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { for _, ent := range r.toc.Entries { if ent.Name == file { if ent.Type != "symlink" { @@ -1940,7 +1973,7 @@ func hasSymlink(file, target string) stargzCheck { } func lookupMatch(name string, want *TOCEntry) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { e, ok := r.Lookup(name) if !ok { t.Fatalf("failed to Lookup entry %q", name) @@ -1953,7 +1986,7 @@ func lookupMatch(name string, want *TOCEntry) stargzCheck { } func hasEntryOwner(entry string, owner owner) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) if !ok { t.Errorf("entry %q not found", entry) @@ -1967,7 +2000,7 @@ func hasEntryOwner(entry string, owner owner) stargzCheck { } func mustSameEntry(files ...string) stargzCheck { - return stargzCheckFn(func(t *testing.T, r *Reader) { + return stargzCheckFn(func(t TestingT, r *Reader) { var first *TOCEntry for _, f := range files { if first == nil { @@ -2039,7 +2072,7 @@ func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format return f(tw, prefix, format) } -func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { +func buildTar(t TestingT, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { format := tar.FormatUnknown for _, opt := range opts { switch v := opt.(type) { @@ -2248,7 +2281,7 @@ func noPrefetchLandmark() tarEntry { }) } -func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { +func regDigest(t TestingT, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { if digestMap == nil { t.Fatalf("digest map mustn't be nil") } @@ -2318,7 +2351,7 @@ func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } func (f fileInfoOnlyMode) Sys() interface{} { return nil } -func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { +func CheckGzipHasStreams(t TestingT, b []byte, streams []int64) { if len(streams) == 0 { return // nop } @@ -2356,7 +2389,7 @@ func CheckGzipHasStreams(t *testing.T, b []byte, streams []int64) { } } -func GzipDiffIDOf(t *testing.T, b []byte) string { +func GzipDiffIDOf(t TestingT, b []byte) string { h := sha256.New() zr, err := gzip.NewReader(bytes.NewReader(b)) if err != nil { diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index af53fb860cc..4e92f5998a8 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -6,11 +6,12 @@ package flate import ( - "encoding/binary" "errors" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -234,12 +235,9 @@ func (d *compressor) fillWindow(b []byte) { // Calculate 256 hashes at the time (more L1 cache hits) loops := (n + 256 - minMatchLength) / 256 - for j := 0; j < loops; j++ { + for j := range loops { startindex := j * 256 - end := startindex + 256 + minMatchLength - 1 - if end > n { - end = n - } + end := min(startindex+256+minMatchLength-1, n) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 @@ -269,18 +267,12 @@ func (d *compressor) fillWindow(b []byte) { // We only look at chainCount possibilities before giving up. // pos = s.index, prevHead = s.chainHead-s.hashOffset, prevLength=minMatchLength-1, lookahead func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, offset int, ok bool) { - minMatchLook := maxMatchLength - if lookahead < minMatchLook { - minMatchLook = lookahead - } + minMatchLook := min(lookahead, maxMatchLength) win := d.window[0 : pos+minMatchLook] // We quit when we get a match that's at least nice long - nice := len(win) - pos - if d.nice < nice { - nice = d.nice - } + nice := min(d.nice, len(win)-pos) // If we've got a match that's good enough, only look in 1/4 the chain. tries := d.chain @@ -288,10 +280,7 @@ func (d *compressor) findMatch(pos int, prevHead int, lookahead int) (length, of wEnd := win[pos+length] wPos := win[pos:] - minIndex := pos - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(pos-windowSize, 0) offset = 0 if d.chain < 100 { @@ -374,7 +363,7 @@ func (d *compressor) writeStoredBlock(buf []byte) error { // of the supplied slice. // The caller must ensure that len(b) >= 4. func hash4(b []byte) uint32 { - return hash4u(binary.LittleEndian.Uint32(b), hashBits) + return hash4u(le.Load32(b, 0), hashBits) } // hash4 returns the hash of u to fit in a hash table with h bits. @@ -389,7 +378,7 @@ func bulkHash4(b []byte, dst []uint32) { if len(b) < 4 { return } - hb := binary.LittleEndian.Uint32(b) + hb := le.Load32(b, 0) dst[0] = hash4u(hb, hashBits) end := len(b) - 4 + 1 @@ -480,10 +469,7 @@ func (d *compressor) deflateLazy() { prevOffset := s.offset s.length = minMatchLength - 1 s.offset = 0 - minIndex := s.index - windowSize - if minIndex < 0 { - minIndex = 0 - } + minIndex := max(s.index-windowSize, 0) if s.chainHead-s.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { if newLength, newOffset, ok := d.findMatch(s.index, s.chainHead-s.hashOffset, lookahead); ok { @@ -503,10 +489,7 @@ func (d *compressor) deflateLazy() { if prevLength < maxMatchLength-checkOff { prevIndex := s.index - 1 if prevIndex+prevLength < s.maxInsertIndex { - end := lookahead - if lookahead > maxMatchLength+checkOff { - end = maxMatchLength + checkOff - } + end := min(lookahead, maxMatchLength+checkOff) end += prevIndex // Hash at match end. @@ -603,15 +586,9 @@ func (d *compressor) deflateLazy() { // table. newIndex := s.index + prevLength - 1 // Calculate missing hashes - end := newIndex - if end > s.maxInsertIndex { - end = s.maxInsertIndex - } + end := min(newIndex, s.maxInsertIndex) end += minMatchLength - 1 - startindex := s.index + 1 - if startindex > s.maxInsertIndex { - startindex = s.maxInsertIndex - } + startindex := min(s.index+1, s.maxInsertIndex) tocheck := d.window[startindex:end] dstSize := len(tocheck) - minMatchLength + 1 if dstSize > 0 { diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go index bb36351a5af..cb855abc4ba 100644 --- a/vendor/github.com/klauspost/compress/flate/dict_decoder.go +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -104,10 +104,7 @@ func (dd *dictDecoder) writeCopy(dist, length int) int { dstBase := dd.wrPos dstPos := dstBase srcPos := dstPos - dist - endPos := dstPos + length - if endPos > len(dd.hist) { - endPos = len(dd.hist) - } + endPos := min(dstPos+length, len(dd.hist)) // Copy non-overlapping section after destination position. // diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index 0e8b1630c04..791c9dcbfd9 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -7,7 +7,6 @@ package flate import ( "fmt" - "math/bits" "github.com/klauspost/compress/internal/le" ) @@ -151,29 +150,9 @@ func (e *fastGen) matchlen(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := min(s+maxMatchLength-4, len(src)) - left := s1 - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:s1] + a := src[s:min(s+maxMatchLength-4, len(src))] b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(a, b)) } // matchlenLong will return the match length between offsets and t in src. @@ -193,29 +172,7 @@ func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - // Extend the match to be as long as possible. - left := len(src) - s - n := int32(0) - for left >= 8 { - diff := le.Load64(src, s) ^ le.Load64(src, t) - if diff != 0 { - return n + int32(bits.TrailingZeros64(diff)>>3) - } - s += 8 - t += 8 - n += 8 - left -= 8 - } - - a := src[s:] - b := src[t:] - for i := range a { - if a[i] != b[i] { - break - } - n++ - } - return n + return int32(matchLen(src[s:], src[t:])) } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index afdc8c053a0..03a1796979b 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -211,7 +211,9 @@ func (w *huffmanBitWriter) flush() { n++ } w.bits = 0 - w.write(w.bytes[:n]) + if n > 0 { + w.write(w.bytes[:n]) + } w.nbytes = 0 } @@ -303,10 +305,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE w.codegenFreq[size]++ count-- for count >= 3 { - n := 6 - if n > count { - n = count - } + n := min(6, count) codegen[outIndex] = 16 outIndex++ codegen[outIndex] = uint8(n - 3) @@ -316,10 +315,7 @@ func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litE } } else { for count >= 11 { - n := 138 - if n > count { - n = count - } + n := min(138, count) codegen[outIndex] = 18 outIndex++ codegen[outIndex] = uint8(n - 11) @@ -438,8 +434,8 @@ func (w *huffmanBitWriter) writeOutBits() { w.nbits -= 48 n := w.nbytes - // We over-write, but faster... - le.Store64(w.bytes[n:], bits) + // We overwrite, but faster... + le.Store64(w.bytes[:], n, bits) n += 6 if n >= bufferFlushSize { @@ -472,7 +468,7 @@ func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, n w.writeBits(int32(numOffsets-1), 5) w.writeBits(int32(numCodegens-4), 4) - for i := 0; i < numCodegens; i++ { + for i := range numCodegens { value := uint(w.codegenEncoding.codes[codegenOrder[i]].len()) w.writeBits(int32(value), 3) } @@ -650,7 +646,7 @@ func (w *huffmanBitWriter) writeBlockDynamic(tokens *tokens, eof bool, input []b w.lastHeader = 0 } - numLiterals, numOffsets := w.indexTokens(tokens, !sync) + numLiterals, numOffsets := w.indexTokens(tokens, fillReuse && !sync) extraBits := 0 ssize, storable := w.storedSize(input) @@ -855,8 +851,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -883,8 +878,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -906,8 +900,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -932,8 +925,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -954,8 +946,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 @@ -1108,7 +1099,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - le.Store64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[:], nbytes, bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1137,8 +1128,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - le.Store64(w.bytes[nbytes:], bits) - //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits + le.Store64(w.bytes[:], nbytes, bits) bits >>= 48 nbits -= 48 nbytes += 6 diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go index be7b58b473f..5f901bd0fe8 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_code.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -91,7 +91,7 @@ func generateFixedLiteralEncoding() *huffmanEncoder { h := newHuffmanEncoder(literalCount) codes := h.codes var ch uint16 - for ch = 0; ch < literalCount; ch++ { + for ch = range uint16(literalCount) { var bits uint16 var size uint8 switch { diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 0d7b437f1c6..6e90126db03 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -485,7 +485,7 @@ func (f *decompressor) readHuffman() error { f.nb -= 5 + 5 + 4 // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. - for i := 0; i < nclen; i++ { + for i := range nclen { for f.nb < 3 { if err := f.moreBits(); err != nil { return err @@ -776,7 +776,7 @@ func fixedHuffmanDecoderInit() { fixedOnce.Do(func() { // These come from the RFC section 3.2.6. var bits [288]int - for i := 0; i < 144; i++ { + for i := range 144 { bits[i] = 8 } for i := 144; i < 256; i++ { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 6e5c21502f7..a22ad7d1255 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -677,10 +677,7 @@ func (e *fastEncL5Window) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) - } + s1 := min(int(s)+maxMatchLength-4, len(src)) // Extend the match to be as long as possible. return int32(matchLen(src[s:s1], src[t:])) diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index 13b9b100dbc..90b74f7acdd 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -56,7 +56,7 @@ func NewStatelessWriter(dst io.Writer) io.WriteCloser { // bitWriterPool contains bit writers that can be reused. var bitWriterPool = sync.Pool{ - New: func() interface{} { + New: func() any { return newHuffmanBitWriter(nil) }, } @@ -184,7 +184,7 @@ func statelessEnc(dst *tokens, src []byte, startAt int16) { // Index until startAt if startAt > 0 { cv := load3232(src, 0) - for i := int16(0); i < startAt; i++ { + for i := range startAt { table[hashSL(cv)] = tableEntry{offset: i} cv = (cv >> 8) | (uint32(src[i+4]) << 24) } diff --git a/vendor/github.com/klauspost/compress/fse/bitwriter.go b/vendor/github.com/klauspost/compress/fse/bitwriter.go index e82fa3bb7b6..d58b3fe4237 100644 --- a/vendor/github.com/klauspost/compress/fse/bitwriter.go +++ b/vendor/github.com/klauspost/compress/fse/bitwriter.go @@ -143,7 +143,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/fse/compress.go b/vendor/github.com/klauspost/compress/fse/compress.go index 074018d8f94..8c8baa4fc2c 100644 --- a/vendor/github.com/klauspost/compress/fse/compress.go +++ b/vendor/github.com/klauspost/compress/fse/compress.go @@ -396,7 +396,7 @@ func (s *Scratch) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/huff0/bitwriter.go b/vendor/github.com/klauspost/compress/huff0/bitwriter.go index 0ebc9aaac76..41db94cded0 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitwriter.go +++ b/vendor/github.com/klauspost/compress/huff0/bitwriter.go @@ -85,7 +85,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 84aa3d12f00..a97cf1b5d35 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -276,7 +276,7 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { offsetIdx := len(s.Out) s.Out = append(s.Out, sixZeros[:]...) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -312,7 +312,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { segmentSize := (len(src) + 3) / 4 var wg sync.WaitGroup wg.Add(4) - for i := 0; i < 4; i++ { + for i := range 4 { toDo := src if len(toDo) > segmentSize { toDo = toDo[:segmentSize] @@ -326,7 +326,7 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { }(i) } wg.Wait() - for i := 0; i < 4; i++ { + for i := range 4 { o := s.tmpOut[i] if len(o) > math.MaxUint16 { // We cannot store the size in the jump table diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 0f56b02d747..7d0efa8818a 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -626,7 +626,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -798,10 +798,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { @@ -864,7 +861,7 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { var br [4]bitReaderBytes start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -1035,10 +1032,7 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go index ba7e8e6b027..99ddd4af97c 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress_amd64.go @@ -58,7 +58,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { var br [4]bitReaderShifted // Decode "jump table" start := 6 - for i := 0; i < 3; i++ { + for i := range 3 { length := int(src[i*2]) | (int(src[i*2+1]) << 8) if start+length >= len(src) { return nil, errors.New("truncated input (or invalid offset)") @@ -109,10 +109,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i - endsAt := offset + remainBytes - if endsAt > len(out) { - endsAt = len(out) - } + endsAt := min(offset+remainBytes, len(out)) br := &br[i] bitsLeft := br.remaining() for bitsLeft > 0 { diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 77ecd68e0a7..67d9e05b6cc 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -201,7 +201,7 @@ func (c cTable) write(s *Scratch) error { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ @@ -271,7 +271,7 @@ func (c cTable) estTableSize(s *Scratch) (sz int, err error) { for i := range hist[:16] { hist[i] = 0 } - for n := uint8(0); n < maxSymbolValue; n++ { + for n := range maxSymbolValue { v := bitsToWeight[c[n].nBits] & 15 huffWeight[n] = v hist[v]++ diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go index 0cfb5c0e278..4f2a0d8c58d 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -37,6 +37,6 @@ func Store32(b []byte, v uint32) { } // Store64 will store v at b. -func Store64(b []byte, v uint64) { - binary.LittleEndian.PutUint64(b, v) +func Store64[I Indexer](b []byte, i I, v uint64) { + binary.LittleEndian.PutUint64(b[i:], v) } diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go index ada45cd909e..218a38bc4a5 100644 --- a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -38,18 +38,15 @@ func Load64[I Indexer](b []byte, i I) uint64 { // Store16 will store v at b. func Store16(b []byte, v uint16) { - //binary.LittleEndian.PutUint16(b, v) *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v } // Store32 will store v at b. func Store32(b []byte, v uint32) { - //binary.LittleEndian.PutUint32(b, v) *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v } -// Store64 will store v at b. -func Store64(b []byte, v uint64) { - //binary.LittleEndian.PutUint64(b, v) - *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +// Store64 will store v at b[i:]. +func Store64[I Indexer](b []byte, i I, v uint64) { + *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) = v } diff --git a/vendor/github.com/klauspost/compress/internal/snapref/decode.go b/vendor/github.com/klauspost/compress/internal/snapref/decode.go index 40796a49d65..a2c82fcd226 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/decode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/decode.go @@ -209,7 +209,7 @@ func (r *Reader) fill() error { if !r.readFull(r.buf[:len(magicBody)], false) { return r.err } - for i := 0; i < len(magicBody); i++ { + for i := range len(magicBody) { if r.buf[i] != magicBody[i] { r.err = ErrCorrupt return r.err diff --git a/vendor/github.com/klauspost/compress/internal/snapref/encode.go b/vendor/github.com/klauspost/compress/internal/snapref/encode.go index 13c6040a5de..860a994167a 100644 --- a/vendor/github.com/klauspost/compress/internal/snapref/encode.go +++ b/vendor/github.com/klauspost/compress/internal/snapref/encode.go @@ -20,8 +20,10 @@ import ( func Encode(dst, src []byte) []byte { if n := MaxEncodedLen(len(src)); n < 0 { panic(ErrTooLarge) - } else if len(dst) < n { + } else if cap(dst) < n { dst = make([]byte, n) + } else { + dst = dst[:n] } // The block starts with the varint-encoded length of the decompressed bytes. diff --git a/vendor/github.com/klauspost/compress/zstd/bitwriter.go b/vendor/github.com/klauspost/compress/zstd/bitwriter.go index 1952f175b0d..b22b297e62a 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitwriter.go +++ b/vendor/github.com/klauspost/compress/zstd/bitwriter.go @@ -88,7 +88,7 @@ func (b *bitWriter) flush32() { // flushAlign will flush remaining full bytes and align to next byte boundary. func (b *bitWriter) flushAlign() { nbBytes := (b.nBits + 7) >> 3 - for i := uint8(0); i < nbBytes; i++ { + for i := range nbBytes { b.out = append(b.out, byte(b.bitContainer>>(i*8))) } b.nBits = 0 diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 0dd742fd2a6..2329e996f86 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -54,11 +54,11 @@ const ( ) var ( - huffDecoderPool = sync.Pool{New: func() interface{} { + huffDecoderPool = sync.Pool{New: func() any { return &huff0.Scratch{} }} - fseDecoderPool = sync.Pool{New: func() interface{} { + fseDecoderPool = sync.Pool{New: func() any { return &fseDecoder{} }} ) @@ -553,7 +553,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { if compMode&3 != 0 { return errors.New("corrupt block: reserved bits not zero") } - for i := uint(0); i < 3; i++ { + for i := range uint(3) { mode := seqCompMode((compMode >> (6 - i*2)) & 3) if debugDecoder { println("Table", tableIndex(i), "is", mode) diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index ea2a19376c1..30df5513d56 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -373,11 +373,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { if cap(dst) == 0 && !d.o.limitToCap { // Allocate len(input) * 2 by default if nothing is provided // and we didn't get frame content size. - size := len(input) * 2 - // Cap to 1 MB. - if size > 1<<20 { - size = 1 << 20 - } + size := min( + // Cap to 1 MB. + len(input)*2, 1<<20) if uint64(size) > d.o.maxDecodedSize { size = int(d.o.maxDecodedSize) } diff --git a/vendor/github.com/klauspost/compress/zstd/dict.go b/vendor/github.com/klauspost/compress/zstd/dict.go index b7b83164bc7..2ffbfdf379e 100644 --- a/vendor/github.com/klauspost/compress/zstd/dict.go +++ b/vendor/github.com/klauspost/compress/zstd/dict.go @@ -194,17 +194,17 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { hist := o.History contents := o.Contents debug := o.DebugOut != nil - println := func(args ...interface{}) { + println := func(args ...any) { if o.DebugOut != nil { fmt.Fprintln(o.DebugOut, args...) } } - printf := func(s string, args ...interface{}) { + printf := func(s string, args ...any) { if o.DebugOut != nil { fmt.Fprintf(o.DebugOut, s, args...) } } - print := func(args ...interface{}) { + print := func(args ...any) { if o.DebugOut != nil { fmt.Fprint(o.DebugOut, args...) } @@ -424,16 +424,10 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { } // Literal table - avgSize := litTotal - if avgSize > huff0.BlockSizeMax/2 { - avgSize = huff0.BlockSizeMax / 2 - } + avgSize := min(litTotal, huff0.BlockSizeMax/2) huffBuff := make([]byte, 0, avgSize) // Target size - div := litTotal / avgSize - if div < 1 { - div = 1 - } + div := max(litTotal/avgSize, 1) if debug { println("Huffman weights:") } @@ -454,7 +448,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { huffBuff = append(huffBuff, 255) } scratch := &huff0.Scratch{TableLog: 11} - for tries := 0; tries < 255; tries++ { + for tries := range 255 { scratch = &huff0.Scratch{TableLog: 11} _, _, err = huff0.Compress1X(huffBuff, scratch) if err == nil { @@ -471,7 +465,7 @@ func BuildDict(o BuildDictOptions) ([]byte, error) { // Bail out.... Just generate something huffBuff = append(huffBuff, bytes.Repeat([]byte{255}, 10000)...) - for i := 0; i < 128; i++ { + for i := range 128 { huffBuff = append(huffBuff, byte(i)) } continue diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 7d250c67f59..c1192ec38f4 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -8,7 +8,7 @@ import ( ) const ( - dictShardBits = 6 + dictShardBits = 7 ) type fastBase struct { @@ -41,11 +41,9 @@ func (e *fastBase) AppendCRC(dst []byte) []byte { // or a window size small enough to contain the input size, if > 0. func (e *fastBase) WindowSize(size int64) int32 { if size > 0 && size < int64(e.maxMatchOff) { - b := int32(1) << uint(bits.Len(uint(size))) - // Keep minimum window. - if b < 1024 { - b = 1024 - } + b := max( + // Keep minimum window. + int32(1)< tMin && s > nextEmit && src[offset-1] == src[s-1] && l < maxMatchLength { s-- offset-- @@ -382,10 +377,7 @@ encodeLoop: nextEmit = s // Index skipped... - end := s - if s > sLimit+4 { - end = sLimit + 4 - } + end := min(s, sLimit+4) off := index0 + e.cur for index0 < end { cv0 := load6432(src, index0) @@ -444,10 +436,7 @@ encodeLoop: nextEmit = s // Index old s + 1 -> s - 1 or sLimit - end := s - if s > sLimit-4 { - end = sLimit - 4 - } + end := min(s, sLimit-4) off := index0 + e.cur for index0 < end { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index 84a79fde767..85dcd28c32e 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -190,10 +190,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -252,10 +249,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -480,10 +474,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -719,10 +710,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -783,10 +771,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -1005,10 +990,7 @@ encodeLoop: l := matched // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index d36be7bd8c2..cf8cad00dcf 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -13,7 +13,7 @@ const ( dFastLongLen = 8 // Bytes used for table hash dLongTableShardCnt = 1 << (dFastLongTableBits - dictShardBits) // Number of shards in the table - dLongTableShardSize = dFastLongTableSize / tableShardCnt // Size of an individual shard + dLongTableShardSize = dFastLongTableSize / dLongTableShardCnt // Size of an individual shard dFastShortTableBits = tableBits // Bits used in the short match table dFastShortTableSize = 1 << dFastShortTableBits // Size of the table @@ -149,10 +149,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -266,10 +263,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -462,10 +456,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -576,10 +567,7 @@ encodeLoop: l := int32(matchLen(src[s+4:], src[t+4:])) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -809,10 +797,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for repIndex > tMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch-1 { repIndex-- start-- @@ -927,10 +912,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/enc_fast.go b/vendor/github.com/klauspost/compress/zstd/enc_fast.go index f45a3da7dae..9180a3a5820 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_fast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_fast.go @@ -143,10 +143,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -223,10 +220,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- @@ -387,10 +381,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] { repIndex-- start-- @@ -469,10 +460,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] { s-- t-- @@ -655,10 +643,7 @@ encodeLoop: // and have to do special offset treatment. startLimit := nextEmit + 1 - sMin := s - e.maxMatchOff - if sMin < 0 { - sMin = 0 - } + sMin := max(s-e.maxMatchOff, 0) for repIndex > sMin && start > startLimit && src[repIndex-1] == src[start-1] && seq.matchLen < maxMatchLength-zstdMinMatch { repIndex-- start-- @@ -735,10 +720,7 @@ encodeLoop: l := e.matchlen(s+4, t+4, src) + 4 // Extend backwards - tMin := s - e.maxMatchOff - if tMin < 0 { - tMin = 0 - } + tMin := max(s-e.maxMatchOff, 0) for t > tMin && s > nextEmit && src[t-1] == src[s-1] && l < maxMatchLength { s-- t-- diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index e47af66e7c9..d88f067e5c2 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -238,10 +238,7 @@ func (d *frameDec) reset(br byteBuffer) error { if d.WindowSize == 0 && d.SingleSegment { // We may not need window in this case. - d.WindowSize = d.FrameContentSize - if d.WindowSize < MinWindowSize { - d.WindowSize = MinWindowSize - } + d.WindowSize = max(d.FrameContentSize, MinWindowSize) if d.WindowSize > d.o.maxDecodedSize { if debugDecoder { printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize) diff --git a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go index ab26326a8ff..3a0f4e7fbe6 100644 --- a/vendor/github.com/klauspost/compress/zstd/fse_encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/fse_encoder.go @@ -149,7 +149,7 @@ func (s *fseEncoder) buildCTable() error { if v > largeLimit { s.zeroBits = true } - for nbOccurrences := int16(0); nbOccurrences < v; nbOccurrences++ { + for range v { tableSymbol[position] = symbol position = (position + step) & tableMask for position > highThreshold { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index 9a7de82f9ef..0bfb0e43c7b 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -231,10 +231,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state out := s.out - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) if debugDecoder { println("decodeSync: decoding", seqs, "sequences", br.remain(), "bits remain on stream") diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index c59f17e07ad..1f8c3cec28c 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -79,10 +79,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeSyncAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], @@ -237,10 +234,7 @@ func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmC func (s *sequenceDecs) decode(seqs []seqVals) error { br := s.br - maxBlockSize := maxCompressedBlockSize - if s.windowSize < maxBlockSize { - maxBlockSize = s.windowSize - } + maxBlockSize := min(s.windowSize, maxCompressedBlockSize) ctx := decodeAsmContext{ llTable: s.litLengths.fse.dt[:maxTablesize], diff --git a/vendor/github.com/klauspost/compress/zstd/simple_go124.go b/vendor/github.com/klauspost/compress/zstd/simple_go124.go new file mode 100644 index 00000000000..2efc0497bf9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/simple_go124.go @@ -0,0 +1,56 @@ +// Copyright 2025+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. + +//go:build go1.24 + +package zstd + +import ( + "errors" + "runtime" + "sync" + "weak" +) + +var weakMu sync.Mutex +var simpleEnc weak.Pointer[Encoder] +var simpleDec weak.Pointer[Decoder] + +// EncodeTo appends the encoded data from src to dst. +func EncodeTo(dst []byte, src []byte) []byte { + weakMu.Lock() + enc := simpleEnc.Value() + if enc == nil { + var err error + enc, err = NewWriter(nil, WithEncoderConcurrency(runtime.NumCPU()), WithWindowSize(1<<20), WithLowerEncoderMem(true), WithZeroFrames(true)) + if err != nil { + panic("failed to create simple encoder: " + err.Error()) + } + simpleEnc = weak.Make(enc) + } + weakMu.Unlock() + + return enc.EncodeAll(src, dst) +} + +// DecodeTo appends the decoded data from src to dst. +// The maximum decoded size is 1GiB, +// not including what may already be in dst. +func DecodeTo(dst []byte, src []byte) ([]byte, error) { + weakMu.Lock() + dec := simpleDec.Value() + if dec == nil { + var err error + dec, err = NewReader(nil, WithDecoderConcurrency(runtime.NumCPU()), WithDecoderLowmem(true), WithDecoderMaxMemory(1<<30)) + if err != nil { + weakMu.Unlock() + return nil, errors.New("failed to create simple decoder: " + err.Error()) + } + runtime.SetFinalizer(dec, func(d *Decoder) { + d.Close() + }) + simpleDec = weak.Make(dec) + } + weakMu.Unlock() + return dec.DecodeAll(src, dst) +} diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index a17381b8f89..336c2889304 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -257,7 +257,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { if !r.readFull(r.buf[:len(snappyMagicBody)], false) { return written, r.err } - for i := 0; i < len(snappyMagicBody); i++ { + for i := range len(snappyMagicBody) { if r.buf[i] != snappyMagicBody[i] { println("r.buf[i] != snappyMagicBody[i]", r.buf[i], snappyMagicBody[i], i) r.err = ErrSnappyCorrupt diff --git a/vendor/github.com/klauspost/compress/zstd/zip.go b/vendor/github.com/klauspost/compress/zstd/zip.go index 29c15c8c4ef..3198d718926 100644 --- a/vendor/github.com/klauspost/compress/zstd/zip.go +++ b/vendor/github.com/klauspost/compress/zstd/zip.go @@ -19,7 +19,7 @@ const ZipMethodWinZip = 93 const ZipMethodPKWare = 20 // zipReaderPool is the default reader pool. -var zipReaderPool = sync.Pool{New: func() interface{} { +var zipReaderPool = sync.Pool{New: func() any { z, err := NewReader(nil, WithDecoderLowmem(true), WithDecoderMaxWindow(128<<20), WithDecoderConcurrency(1)) if err != nil { panic(err) diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 6252b46ae6f..1a869710d2c 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -98,13 +98,13 @@ var ( ErrDecoderNilInput = errors.New("nil input provided as reader") ) -func println(a ...interface{}) { +func println(a ...any) { if debug || debugDecoder || debugEncoder { log.Println(a...) } } -func printf(format string, a ...interface{}) { +func printf(format string, a ...any) { if debug || debugDecoder || debugEncoder { log.Printf(format, a...) } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.envrc b/vendor/github.com/mistifyio/go-zfs/v4/.envrc similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.envrc rename to vendor/github.com/mistifyio/go-zfs/v4/.envrc diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.gitignore b/vendor/github.com/mistifyio/go-zfs/v4/.gitignore similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.gitignore rename to vendor/github.com/mistifyio/go-zfs/v4/.gitignore diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml b/vendor/github.com/mistifyio/go-zfs/v4/.golangci.yml similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.golangci.yml rename to vendor/github.com/mistifyio/go-zfs/v4/.golangci.yml diff --git a/vendor/github.com/mistifyio/go-zfs/v3/.yamllint b/vendor/github.com/mistifyio/go-zfs/v4/.yamllint similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/.yamllint rename to vendor/github.com/mistifyio/go-zfs/v4/.yamllint diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md b/vendor/github.com/mistifyio/go-zfs/v4/CHANGELOG.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/CHANGELOG.md rename to vendor/github.com/mistifyio/go-zfs/v4/CHANGELOG.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md b/vendor/github.com/mistifyio/go-zfs/v4/CONTRIBUTING.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/CONTRIBUTING.md rename to vendor/github.com/mistifyio/go-zfs/v4/CONTRIBUTING.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/LICENSE b/vendor/github.com/mistifyio/go-zfs/v4/LICENSE similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/LICENSE rename to vendor/github.com/mistifyio/go-zfs/v4/LICENSE diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Makefile b/vendor/github.com/mistifyio/go-zfs/v4/Makefile similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/Makefile rename to vendor/github.com/mistifyio/go-zfs/v4/Makefile diff --git a/vendor/github.com/mistifyio/go-zfs/v3/README.md b/vendor/github.com/mistifyio/go-zfs/v4/README.md similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/README.md rename to vendor/github.com/mistifyio/go-zfs/v4/README.md diff --git a/vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile b/vendor/github.com/mistifyio/go-zfs/v4/Vagrantfile similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/Vagrantfile rename to vendor/github.com/mistifyio/go-zfs/v4/Vagrantfile diff --git a/vendor/github.com/mistifyio/go-zfs/v3/error.go b/vendor/github.com/mistifyio/go-zfs/v4/error.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/error.go rename to vendor/github.com/mistifyio/go-zfs/v4/error.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/lint.mk b/vendor/github.com/mistifyio/go-zfs/v4/lint.mk similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/lint.mk rename to vendor/github.com/mistifyio/go-zfs/v4/lint.mk diff --git a/vendor/github.com/mistifyio/go-zfs/v3/rules.mk b/vendor/github.com/mistifyio/go-zfs/v4/rules.mk similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/rules.mk rename to vendor/github.com/mistifyio/go-zfs/v4/rules.mk diff --git a/vendor/github.com/mistifyio/go-zfs/v3/shell.nix b/vendor/github.com/mistifyio/go-zfs/v4/shell.nix similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/shell.nix rename to vendor/github.com/mistifyio/go-zfs/v4/shell.nix diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils.go b/vendor/github.com/mistifyio/go-zfs/v4/utils.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/utils.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go b/vendor/github.com/mistifyio/go-zfs/v4/utils_notsolaris.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/utils_notsolaris.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils_notsolaris.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go b/vendor/github.com/mistifyio/go-zfs/v4/utils_solaris.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/utils_solaris.go rename to vendor/github.com/mistifyio/go-zfs/v4/utils_solaris.go diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zfs.go b/vendor/github.com/mistifyio/go-zfs/v4/zfs.go similarity index 99% rename from vendor/github.com/mistifyio/go-zfs/v3/zfs.go rename to vendor/github.com/mistifyio/go-zfs/v4/zfs.go index 1166bdc212f..dbcbc8ae307 100644 --- a/vendor/github.com/mistifyio/go-zfs/v3/zfs.go +++ b/vendor/github.com/mistifyio/go-zfs/v4/zfs.go @@ -312,7 +312,7 @@ func (d *Dataset) SetProperty(key, val string) error { // A full list of available ZFS properties may be found in the ZFS manual: // https://openzfs.github.io/openzfs-docs/man/7/zfsprops.7.html. func (d *Dataset) GetProperty(key string) (string, error) { - out, err := zfsOutput("get", "-H", key, d.Name) + out, err := zfsOutput("get", "-Hp", key, d.Name) if err != nil { return "", err } diff --git a/vendor/github.com/mistifyio/go-zfs/v3/zpool.go b/vendor/github.com/mistifyio/go-zfs/v4/zpool.go similarity index 100% rename from vendor/github.com/mistifyio/go-zfs/v3/zpool.go rename to vendor/github.com/mistifyio/go-zfs/v4/zpool.go diff --git a/vendor/github.com/pkg/sftp/SECURITY.md b/vendor/github.com/pkg/sftp/SECURITY.md new file mode 100644 index 00000000000..9ebc2631ff5 --- /dev/null +++ b/vendor/github.com/pkg/sftp/SECURITY.md @@ -0,0 +1,13 @@ +# Security Policy + +## Supported Versions + +Security updates are provided for the latest released version of this package. +We also welcome vulnerability reports for the development version to help us ensure it is secure before the next release. + +## Reporting a Vulnerability + +If you believe you’ve found a security vulnerability in this project, we strongly encourage you to report it privately using GitHub’s [security advisory system](https://github.com/pkg/sftp/security/advisories/new). +This will allow us to review and address the issue before public disclosure. + +Thank you for helping us keep the project secure. diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go index 0d4d9a91835..307a35ea5b6 100644 --- a/vendor/github.com/pkg/sftp/client.go +++ b/vendor/github.com/pkg/sftp/client.go @@ -158,6 +158,17 @@ func UseFstat(value bool) ClientOption { } } +// CopyStderrTo specifies a writer to which the standard error of the remote sftp-server command should be written. +// +// The writer passed in will not be automatically closed. +// It is the responsibility of the caller to coordinate closure of any writers. +func CopyStderrTo(wr io.Writer) ClientOption { + return func(c *Client) error { + c.stderrTo = wr + return nil + } +} + // Client represents an SFTP session on a *ssh.ClientConn SSH connection. // Multiple Clients can be active on a single SSH connection, and a Client // may be called concurrently from multiple Goroutines. @@ -166,6 +177,8 @@ func UseFstat(value bool) ClientOption { type Client struct { clientConn + stderrTo io.Writer + ext map[string]string // Extensions (name -> data). maxPacket int // max packet size read or written. @@ -186,9 +199,7 @@ func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { if err != nil { return nil, err } - if err := s.RequestSubsystem("sftp"); err != nil { - return nil, err - } + pw, err := s.StdinPipe() if err != nil { return nil, err @@ -197,15 +208,27 @@ func NewClient(conn *ssh.Client, opts ...ClientOption) (*Client, error) { if err != nil { return nil, err } + perr, err := s.StderrPipe() + if err != nil { + return nil, err + } - return NewClientPipe(pr, pw, opts...) + if err := s.RequestSubsystem("sftp"); err != nil { + return nil, err + } + + return newClientPipe(pr, perr, pw, s.Wait, opts...) } // NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. // This can be used for connecting to an SFTP server over TCP/TLS or by using // the system's ssh client program (e.g. via exec.Command). func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Client, error) { - sftp := &Client{ + return newClientPipe(rd, nil, wr, nil, opts...) +} + +func newClientPipe(rd, stderr io.Reader, wr io.WriteCloser, wait func() error, opts ...ClientOption) (*Client, error) { + c := &Client{ clientConn: clientConn{ conn: conn{ Reader: rd, @@ -213,6 +236,7 @@ func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Clie }, inflight: make(map[uint32]chan<- result), closed: make(chan struct{}), + wait: wait, }, ext: make(map[string]string), @@ -222,32 +246,50 @@ func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...ClientOption) (*Clie } for _, opt := range opts { - if err := opt(sftp); err != nil { + if err := opt(c); err != nil { wr.Close() return nil, err } } - if err := sftp.sendInit(); err != nil { + if stderr != nil { + wr := io.Discard + if c.stderrTo != nil { + wr = c.stderrTo + } + + go func() { + // DO NOT close the writer! + // Programs may pass in `os.Stderr` to write the remote stderr to, + // and the program may continue after disconnect by reconnecting. + // But if we've closed their stderr, then we just messed everything up. + + if _, err := io.Copy(wr, stderr); err != nil { + debug("error copying stderr: %v", err) + } + }() + } + + if err := c.sendInit(); err != nil { wr.Close() return nil, fmt.Errorf("error sending init packet to server: %w", err) } - if err := sftp.recvVersion(); err != nil { + if err := c.recvVersion(); err != nil { wr.Close() return nil, fmt.Errorf("error receiving version packet from server: %w", err) } - sftp.clientConn.wg.Add(1) + c.clientConn.wg.Add(1) go func() { - defer sftp.clientConn.wg.Done() + defer c.clientConn.wg.Done() - if err := sftp.clientConn.recv(); err != nil { - sftp.clientConn.broadcastErr(err) + if err := c.clientConn.recv(); err != nil { + c.clientConn.broadcastErr(err) } }() - return sftp, nil + return c, nil } // Create creates the named file mode 0666 (before umask), truncating it if it diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go index 93bc37bf715..e68a2bd06ff 100644 --- a/vendor/github.com/pkg/sftp/conn.go +++ b/vendor/github.com/pkg/sftp/conn.go @@ -22,7 +22,7 @@ type conn struct { // For the client mode just pass 0. // It returns io.EOF if the connection is closed and // there are no more packets to read. -func (c *conn) recvPacket(orderID uint32) (uint8, []byte, error) { +func (c *conn) recvPacket(orderID uint32) (fxp, []byte, error) { return recvPacket(c, c.alloc, orderID) } @@ -43,6 +43,8 @@ type clientConn struct { conn wg sync.WaitGroup + wait func() error // if non-nil, call this during Wait() to get a possible remote status error. + sync.Mutex // protects inflight inflight map[uint32]chan<- result // outstanding requests @@ -55,6 +57,27 @@ type clientConn struct { // goroutines. func (c *clientConn) Wait() error { <-c.closed + + if c.wait == nil { + // Only return this error if c.wait won't return something more useful. + return c.err + } + + if err := c.wait(); err != nil { + + // TODO: when https://github.com/golang/go/issues/35025 is fixed, + // we can remove this if block entirely. + // Right now, it’s always going to return this, so it is not useful. + // But we have this code here so that as soon as the ssh library is updated, + // we can return a possibly more useful error. + if err.Error() == "ssh: session not started" { + return c.err + } + + return err + } + + // c.wait returned no error; so, let's return something maybe more useful. return c.err } @@ -119,7 +142,7 @@ func (c *clientConn) getChannel(sid uint32) (chan<- result, bool) { // result captures the result of receiving the a packet from the server type result struct { - typ byte + typ fxp data []byte err error } @@ -129,7 +152,7 @@ type idmarshaler interface { encoding.BinaryMarshaler } -func (c *clientConn) sendPacket(ctx context.Context, ch chan result, p idmarshaler) (byte, []byte, error) { +func (c *clientConn) sendPacket(ctx context.Context, ch chan result, p idmarshaler) (fxp, []byte, error) { if cap(ch) < 1 { ch = make(chan result, 1) } diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go index bfe6a3c9770..3836ab6f20c 100644 --- a/vendor/github.com/pkg/sftp/packet.go +++ b/vendor/github.com/pkg/sftp/packet.go @@ -304,16 +304,22 @@ func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { return nil } -func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, error) { +func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (fxp, []byte, error) { var b []byte if alloc != nil { b = alloc.GetPage(orderID) } else { b = make([]byte, 4) } - if _, err := io.ReadFull(r, b[:4]); err != nil { - return 0, nil, err + + if n, err := io.ReadFull(r, b[:4]); err != nil { + if err == io.EOF { + return 0, nil, err + } + + return 0, nil, fmt.Errorf("error reading packet length: %d of 4: %w", n, err) } + length, _ := unmarshalUint32(b) if length > maxMsgLength { debug("recv packet %d bytes too long", length) @@ -323,24 +329,39 @@ func recvPacket(r io.Reader, alloc *allocator, orderID uint32) (uint8, []byte, e debug("recv packet of 0 bytes too short") return 0, nil, errShortPacket } + if alloc == nil { b = make([]byte, length) } - if _, err := io.ReadFull(r, b[:length]); err != nil { + + n, err := io.ReadFull(r, b[:length]) + b = b[:n] + + if err != nil { + debug("recv packet error: %d of %d bytes: %x", n, length, b) + // ReadFull only returns EOF if it has read no bytes. // In this case, that means a partial packet, and thus unexpected. if err == io.EOF { err = io.ErrUnexpectedEOF } - debug("recv packet %d bytes: err %v", length, err) - return 0, nil, err + + if n == 0 { + return 0, nil, fmt.Errorf("error reading packet body: %d of %d: %w", n, length, err) + } + + return 0, nil, fmt.Errorf("error reading packet body: %d of %d: (%s) %w", n, length, fxp(b[0]), err) } + + typ, payload := fxp(b[0]), b[1:n] + if debugDumpRxPacketBytes { - debug("recv packet: %s %d bytes %x", fxp(b[0]), length, b[1:length]) + debug("recv packet: %s %d bytes %x", typ, length, payload) } else if debugDumpRxPacket { - debug("recv packet: %s %d bytes", fxp(b[0]), length) + debug("recv packet: %s %d bytes", typ, length) } - return b[0], b[1:length], nil + + return typ, payload, nil } type extensionPair struct { diff --git a/vendor/github.com/pkg/sftp/request-example.go b/vendor/github.com/pkg/sftp/request-example.go index 519b3b76806..f003e7114c0 100644 --- a/vendor/github.com/pkg/sftp/request-example.go +++ b/vendor/github.com/pkg/sftp/request-example.go @@ -20,7 +20,7 @@ const maxSymlinkFollows = 5 var errTooManySymlinks = errors.New("too many symbolic links") -// InMemHandler returns a Hanlders object with the test handlers. +// InMemHandler returns a Handlers object with the test handlers. func InMemHandler() Handlers { root := &root{ rootFile: &memFile{name: "/", modtime: time.Now(), isdir: true}, @@ -449,7 +449,7 @@ func (fs *root) Lstat(r *Request) (ListerAt, error) { return listerat{file}, nil } -// In memory file-system-y thing that the Hanlders live on +// In memory file-system-y thing that the Handlers live on type root struct { rootFile *memFile mockErr error diff --git a/vendor/github.com/pkg/sftp/request-server.go b/vendor/github.com/pkg/sftp/request-server.go index 11047e6b10a..08c24d7f6fa 100644 --- a/vendor/github.com/pkg/sftp/request-server.go +++ b/vendor/github.com/pkg/sftp/request-server.go @@ -148,7 +148,7 @@ func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { var err error var pkt requestPacket - var pktType uint8 + var pktType fxp var pktBytes []byte for { @@ -158,7 +158,7 @@ func (rs *RequestServer) serveLoop(pktChan chan<- orderedRequest) error { return err } - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + pkt, err = makePacket(rxPacket{pktType, pktBytes}) if err != nil { switch { case errors.Is(err, errUnknownExtendedPacket): diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go index cd656d8fbf0..7735c42c4e5 100644 --- a/vendor/github.com/pkg/sftp/server.go +++ b/vendor/github.com/pkg/sftp/server.go @@ -390,7 +390,7 @@ func (svr *Server) Serve() error { var err error var pkt requestPacket - var pktType uint8 + var pktType fxp var pktBytes []byte for { pktType, pktBytes, err = svr.serverConn.recvPacket(svr.pktMgr.getNextOrderID()) @@ -403,7 +403,7 @@ func (svr *Server) Serve() error { break } - pkt, err = makePacket(rxPacket{fxp(pktType), pktBytes}) + pkt, err = makePacket(rxPacket{pktType, pktBytes}) if err != nil { switch { case errors.Is(err, errUnknownExtendedPacket): diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go index 778c8f3d731..1e698bb2782 100644 --- a/vendor/github.com/pkg/sftp/sftp.go +++ b/vendor/github.com/pkg/sftp/sftp.go @@ -184,15 +184,15 @@ func (f fx) String() string { } type unexpectedPacketErr struct { - want, got uint8 + want, got fxp } func (u *unexpectedPacketErr) Error() string { - return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) + return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", u.want, u.got) } -func unimplementedPacketErr(u uint8) error { - return fmt.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) +func unimplementedPacketErr(u fxp) error { + return fmt.Errorf("sftp: unimplemented packet type: got %v", u) } type unexpectedIDErr struct{ want, got uint32 } diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/github.com/vbatts/tar-split/archive/tar/common.go index dee9e47e4ae..e687a08c966 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/common.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/common.go @@ -34,6 +34,7 @@ var ( errMissData = errors.New("archive/tar: sparse file references non-existent data") errUnrefData = errors.New("archive/tar: sparse file contains unreferenced data") errWriteHole = errors.New("archive/tar: write non-NUL byte in sparse hole") + errSparseTooLong = errors.New("archive/tar: sparse map too long") ) type headerError []string diff --git a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go index 248a7ccb15a..a645c41605f 100644 --- a/vendor/github.com/vbatts/tar-split/archive/tar/reader.go +++ b/vendor/github.com/vbatts/tar-split/archive/tar/reader.go @@ -581,12 +581,17 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { cntNewline int64 buf bytes.Buffer blk block + totalSize int ) // feedTokens copies data in blocks from r into buf until there are // at least cnt newlines in buf. It will not read more blocks than needed. feedTokens := func(n int64) error { for cntNewline < n { + totalSize += len(blk) + if totalSize > maxSpecialFileSize { + return errSparseTooLong + } if _, err := mustReadFull(r, blk[:]); err != nil { return err } @@ -619,8 +624,8 @@ func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) { } // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. + // numEntries is trusted after this since feedTokens limits the number of + // tokens based on maxSpecialFileSize. if err := feedTokens(2 * numEntries); err != nil { return nil, err } diff --git a/vendor/go.podman.io/common/libimage/platform.go b/vendor/go.podman.io/common/libimage/platform.go index 6be272c43f5..20c0f3c1d4e 100644 --- a/vendor/go.podman.io/common/libimage/platform.go +++ b/vendor/go.podman.io/common/libimage/platform.go @@ -8,20 +8,24 @@ import ( ) // PlatformPolicy controls the behavior of image-platform matching. +// // Deprecated: new code should use define.PlatformPolicy directly. type PlatformPolicy = define.PlatformPolicy const ( // Only debug log if an image does not match the expected platform. + // // Deprecated: new code should reference define.PlatformPolicyDefault directly. PlatformPolicyDefault = define.PlatformPolicyDefault // Warn if an image does not match the expected platform. + // // Deprecated: new code should reference define.PlatformPolicyWarn directly. PlatformPolicyWarn = define.PlatformPolicyWarn ) // NormalizePlatform normalizes (according to the OCI spec) the specified os, // arch and variant. If left empty, the individual item will be normalized. +// // Deprecated: new code should call libimage/platform.Normalize() instead. func NormalizePlatform(rawOS, rawArch, rawVariant string) (os, arch, variant string) { return platform.Normalize(rawOS, rawArch, rawVariant) diff --git a/vendor/go.podman.io/common/libnetwork/types/network.go b/vendor/go.podman.io/common/libnetwork/types/network.go index df6a5ee4468..5b15d080372 100644 --- a/vendor/go.podman.io/common/libnetwork/types/network.go +++ b/vendor/go.podman.io/common/libnetwork/types/network.go @@ -322,6 +322,7 @@ type PortMapping struct { } // OCICNIPortMapping maps to the standard CNI portmapping Capability. +// // Deprecated: Do not use this struct for new fields. This only exists // for backwards compatibility. type OCICNIPortMapping struct { diff --git a/vendor/go.podman.io/common/pkg/config/config.go b/vendor/go.podman.io/common/pkg/config/config.go index 8bc23deba1b..ae0587f7b73 100644 --- a/vendor/go.podman.io/common/pkg/config/config.go +++ b/vendor/go.podman.io/common/pkg/config/config.go @@ -248,6 +248,7 @@ type ContainersConfig struct { UserNS string `toml:"userns,omitempty"` // UserNSSize how many UIDs to allocate for automatically created UserNS + // // Deprecated: no user of this field is known. UserNSSize int `toml:"userns_size,omitempty,omitzero"` } @@ -558,6 +559,7 @@ type EngineConfig struct { // PodmanshTimeout is the number of seconds to wait for podmansh logins. // In other words, the timeout for the `podmansh` container to be in running // state. + // // Deprecated: Use podmansh.Timeout instead. podmansh.Timeout has precedence. PodmanshTimeout uint `toml:"podmansh_timeout,omitempty,omitzero"` } diff --git a/vendor/go.podman.io/common/pkg/config/containers.conf b/vendor/go.podman.io/common/pkg/config/containers.conf index 2e392d048e1..fd337831c9d 100644 --- a/vendor/go.podman.io/common/pkg/config/containers.conf +++ b/vendor/go.podman.io/common/pkg/config/containers.conf @@ -909,7 +909,7 @@ default_sysctls = [ # "https://example.com/linux/amd64/foobar.ami" on a Linux AMD machine. # If unspecified, the default Podman machine image will be used. # -#image = "" +#image = "docker://quay.io/podman/machine-os" # Memory in MB a machine is created with. # diff --git a/vendor/go.podman.io/common/pkg/config/default.go b/vendor/go.podman.io/common/pkg/config/default.go index 3bf0bc16929..54402d1712e 100644 --- a/vendor/go.podman.io/common/pkg/config/default.go +++ b/vendor/go.podman.io/common/pkg/config/default.go @@ -192,6 +192,7 @@ const ( // DefaultShmSize is the default upper limit on the size of tmpfs mounts. DefaultShmSize = "65536k" // DefaultUserNSSize indicates the default number of UIDs allocated for user namespace within a container. + // // Deprecated: no user of this field is known. DefaultUserNSSize = 65536 // OCIBufSize limits maximum LogSizeMax. @@ -305,14 +306,11 @@ func defaultMachineConfig() MachineConfig { return MachineConfig{ CPUs: uint64(cpus), DiskSize: 100, - // TODO: Set machine image default here - // Currently the default is set in Podman as we need time to stabilize - // VM images and locations between different providers. - Image: "", - Memory: 2048, - User: getDefaultMachineUser(), - Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), - Rosetta: true, + Image: "docker://quay.io/podman/machine-os", + Memory: 2048, + User: getDefaultMachineUser(), + Volumes: attributedstring.NewSlice(getDefaultMachineVolumes()), + Rosetta: true, } } diff --git a/vendor/go.podman.io/common/pkg/seccomp/default_linux.go b/vendor/go.podman.io/common/pkg/seccomp/default_linux.go index 30137656673..33d881472c6 100644 --- a/vendor/go.podman.io/common/pkg/seccomp/default_linux.go +++ b/vendor/go.podman.io/common/pkg/seccomp/default_linux.go @@ -616,6 +616,7 @@ func DefaultProfile() *Seccomp { Names: []string{ "bpf", "lookup_dcookie", + "perf_event_open", "quotactl", "quotactl_fd", "setdomainname", @@ -631,7 +632,6 @@ func DefaultProfile() *Seccomp { { Names: []string{ "lookup_dcookie", - "perf_event_open", "quotactl", "quotactl_fd", "setdomainname", @@ -927,7 +927,7 @@ func DefaultProfile() *Seccomp { ErrnoRet: &eperm, Args: []*Arg{}, Excludes: Filter{ - Caps: []string{"CAP_SYS_ADMIN", "CAP_BPF"}, + Caps: []string{"CAP_SYS_ADMIN", "CAP_PERFMON"}, }, }, { diff --git a/vendor/go.podman.io/common/pkg/seccomp/seccomp.json b/vendor/go.podman.io/common/pkg/seccomp/seccomp.json index 92d882b5ccf..5d02eb50cd0 100644 --- a/vendor/go.podman.io/common/pkg/seccomp/seccomp.json +++ b/vendor/go.podman.io/common/pkg/seccomp/seccomp.json @@ -693,6 +693,7 @@ "names": [ "bpf", "lookup_dcookie", + "perf_event_open", "quotactl", "quotactl_fd", "setdomainname", @@ -712,7 +713,6 @@ { "names": [ "lookup_dcookie", - "perf_event_open", "quotactl", "quotactl_fd", "setdomainname", @@ -1105,7 +1105,7 @@ "excludes": { "caps": [ "CAP_SYS_ADMIN", - "CAP_BPF" + "CAP_PERFMON" ] }, "errnoRet": 1, diff --git a/vendor/go.podman.io/common/version/version.go b/vendor/go.podman.io/common/version/version.go index ad62f22ac32..4a2be1fe1e2 100644 --- a/vendor/go.podman.io/common/version/version.go +++ b/vendor/go.podman.io/common/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "0.66.0" +const Version = "0.67.0-dev" diff --git a/vendor/go.podman.io/image/v5/docker/docker_client.go b/vendor/go.podman.io/image/v5/docker/docker_client.go index 1c0d67105ee..467879c2366 100644 --- a/vendor/go.podman.io/image/v5/docker/docker_client.go +++ b/vendor/go.podman.io/image/v5/docker/docker_client.go @@ -35,6 +35,7 @@ import ( "go.podman.io/image/v5/types" "go.podman.io/storage/pkg/fileutils" "go.podman.io/storage/pkg/homedir" + "golang.org/x/sync/semaphore" ) const ( @@ -86,8 +87,19 @@ type extensionSignatureList struct { Signatures []extensionSignature `json:"signatures"` } -// bearerToken records a cached token we can use to authenticate. +// bearerToken records a cached token we can use to authenticate, or a pending process to obtain one. +// +// The goroutine obtaining the token holds lock to block concurrent token requests, and fills the structure (err and possibly the other fields) +// before releasing the lock. +// Other goroutines obtain lock to block on the token request, if any; and then inspect err to see if the token is usable. +// If it is not, they try to get a new one. type bearerToken struct { + // lock is held while obtaining the token. Potentially nested inside dockerClient.tokenCacheLock. + // This is a counting semaphore only because we need a cancellable lock operation. + lock *semaphore.Weighted + + // The following fields can only be accessed with lock held. + err error // nil if the token was successfully obtained (but may be expired); an error if the next lock holder _must_ obtain a new token. token string expirationTime time.Time } @@ -116,8 +128,9 @@ type dockerClient struct { challenges []challenge supportsSignatures bool - // Private state for setupRequestAuth (key: string, value: bearerToken) - tokenCache sync.Map + // Private state for setupRequestAuth + tokenCacheLock sync.Mutex // Protects tokenCache. + tokenCache map[string]*bearerToken // Private state for detectProperties: detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. detectPropertiesError error // detectPropertiesError caches the initial error. @@ -274,6 +287,7 @@ func newDockerClient(sys *types.SystemContext, registry, reference string) (*doc registry: registry, userAgent: userAgent, tlsClientConfig: tlsClientConfig, + tokenCache: map[string]*bearerToken{}, reportedWarnings: set.New[string](), }, nil } @@ -716,50 +730,11 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope req.SetBasicAuth(c.auth.Username, c.auth.Password) return nil case "bearer": - registryToken := c.registryToken - if registryToken == "" { - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below - // uses the same separator when formatting a remote request (and because - // repository names that we create can't contain colons, and extraScope values - // coming from a server come from `parseAuthScope`, which also splits on colons). - cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) - if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { - return fmt.Errorf( - "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", - cacheKey, - colonCount, - ) - } - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - var ( - t *bearerToken - err error - ) - if c.auth.IdentityToken != "" { - t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) - } else { - t, err = c.getBearerToken(req.Context(), challenge, scopes) - } - if err != nil { - return err - } - - token = *t - c.tokenCache.Store(cacheKey, token) - } - registryToken = token.token + token, err := c.obtainBearerToken(req.Context(), challenge, extraScope) + if err != nil { + return err } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) return nil default: logrus.Debugf("no handler for %s authentication", challenge.Scheme) @@ -769,16 +744,94 @@ func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope return nil } -func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { +// obtainBearerToken gets an "Authorization: Bearer" token if one is available, or obtains a fresh one. +func (c *dockerClient) obtainBearerToken(ctx context.Context, challenge challenge, extraScope *authScope) (string, error) { + if c.registryToken != "" { + return c.registryToken, nil + } + + cacheKey := "" + scopes := []authScope{c.scope} + if extraScope != nil { + // Using ':' as a separator here is unambiguous because getBearerToken below + // uses the same separator when formatting a remote request (and because + // repository names that we create can't contain colons, and extraScope values + // coming from a server come from `parseAuthScope`, which also splits on colons). + cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) + if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { + return "", fmt.Errorf( + "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", + cacheKey, + colonCount, + ) + } + scopes = append(scopes, *extraScope) + } + + token, newEntry, err := func() (*bearerToken, bool, error) { // A scope for defer + c.tokenCacheLock.Lock() + defer c.tokenCacheLock.Unlock() + token, ok := c.tokenCache[cacheKey] + if ok { + return token, false, nil + } else { + token = &bearerToken{ + lock: semaphore.NewWeighted(1), + } + // If this is a new *bearerToken, lock the entry before adding it to the cache, so that any other goroutine that finds + // this entry blocks until we obtain the token for the first time, and does not see an empty object + // (and does not try to obtain the token itself when we are going to do so). + if err := token.lock.Acquire(ctx, 1); err != nil { + // We do not block on this Acquire, so we don’t really expect to fail here — but if ctx is canceled, + // there is no point in trying to continue anyway. + return nil, false, err + } + c.tokenCache[cacheKey] = token + return token, true, nil + } + }() + if err != nil { + return "", err + } + if !newEntry { + // If this is an existing *bearerToken, obtain the lock only after releasing c.tokenCacheLock, + // so that users of other cacheKey values are not blocked for the whole duration of our HTTP roundtrip. + if err := token.lock.Acquire(ctx, 1); err != nil { + return "", err + } + } + + defer token.lock.Release(1) + + if !newEntry && token.err == nil && !time.Now().After(token.expirationTime) { + return token.token, nil // We have a usable token already. + } + + if c.auth.IdentityToken != "" { + err = c.getBearerTokenOAuth2(ctx, token, challenge, scopes) + } else { + err = c.getBearerToken(ctx, token, challenge, scopes) + } + token.err = err + if token.err != nil { + return "", token.err + } + return token.token, nil +} + +// getBearerTokenOAuth2 obtains an "Authorization: Bearer" token using a pre-existing identity token per +// https://github.com/distribution/distribution/blob/main/docs/spec/auth/oauth.md for challenge and scopes, +// and writes it into dest. +func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, dest *bearerToken, challenge challenge, + scopes []authScope) error { realm, ok := challenge.Parameters["realm"] if !ok { - return nil, errors.New("missing realm in bearer auth challenge") + return errors.New("missing realm in bearer auth challenge") } authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) if err != nil { - return nil, err + return err } // Make the form data required against the oauth2 authentication @@ -803,26 +856,29 @@ func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge chall logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { - return nil, err + return err } defer res.Body.Close() if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { - return nil, err + return err } - return newBearerTokenFromHTTPResponseBody(res) + return dest.readFromHTTPResponseBody(res) } -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { +// getBearerToken obtains an "Authorization: Bearer" token using a GET request, per +// https://github.com/distribution/distribution/blob/main/docs/spec/auth/token.md for challenge and scopes, +// and writes it into dest. +func (c *dockerClient) getBearerToken(ctx context.Context, dest *bearerToken, challenge challenge, + scopes []authScope) error { realm, ok := challenge.Parameters["realm"] if !ok { - return nil, errors.New("missing realm in bearer auth challenge") + return errors.New("missing realm in bearer auth challenge") } authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) if err != nil { - return nil, err + return err } params := authReq.URL.Query() @@ -850,22 +906,22 @@ func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) res, err := c.client.Do(authReq) if err != nil { - return nil, err + return err } defer res.Body.Close() if err := httpResponseToError(res, "Requesting bearer token"); err != nil { - return nil, err + return err } - return newBearerTokenFromHTTPResponseBody(res) + return dest.readFromHTTPResponseBody(res) } -// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken. +// readFromHTTPResponseBody sets token data by parsing a http.Response. // The caller is still responsible for ensuring res.Body is closed. -func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) { +func (bt *bearerToken) readFromHTTPResponseBody(res *http.Response) error { blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) if err != nil { - return nil, err + return err } var token struct { @@ -881,12 +937,10 @@ func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error if len(bodySample) > bodySampleLength { bodySample = bodySample[:bodySampleLength] } - return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err) + return fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err) } - bt := &bearerToken{ - token: token.Token, - } + bt.token = token.Token if bt.token == "" { bt.token = token.AccessToken } @@ -899,7 +953,7 @@ func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error token.IssuedAt = time.Now().UTC() } bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return bt, nil + return nil } // detectPropertiesHelper performs the work of detectProperties which executes diff --git a/vendor/go.podman.io/image/v5/version/version.go b/vendor/go.podman.io/image/v5/version/version.go index 71a957fc689..e2dd251d2fa 100644 --- a/vendor/go.podman.io/image/v5/version/version.go +++ b/vendor/go.podman.io/image/v5/version/version.go @@ -6,12 +6,12 @@ const ( // VersionMajor is for an API incompatible changes VersionMajor = 5 // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 38 + VersionMinor = 39 // VersionPatch is for backwards-compatible bug fixes VersionPatch = 0 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/go.podman.io/storage/.golangci.yml b/vendor/go.podman.io/storage/.golangci.yml index ad4dec0c162..7c367815be8 100644 --- a/vendor/go.podman.io/storage/.golangci.yml +++ b/vendor/go.podman.io/storage/.golangci.yml @@ -18,3 +18,7 @@ linters: - all - -ST1003 # https://staticcheck.dev/docs/checks/#ST1003 Poorly chosen identifier. - -QF1008 # https://staticcheck.dev/docs/checks/#QF1008 Omit embedded fields from selector expression. + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 diff --git a/vendor/go.podman.io/storage/VERSION b/vendor/go.podman.io/storage/VERSION index 91951fd8ad7..bb55a51f64d 100644 --- a/vendor/go.podman.io/storage/VERSION +++ b/vendor/go.podman.io/storage/VERSION @@ -1 +1 @@ -1.61.0 +1.62.0-dev diff --git a/vendor/go.podman.io/storage/drivers/zfs/zfs.go b/vendor/go.podman.io/storage/drivers/zfs/zfs.go index 8660dbaa3e8..b994278bb2f 100644 --- a/vendor/go.podman.io/storage/drivers/zfs/zfs.go +++ b/vendor/go.podman.io/storage/drivers/zfs/zfs.go @@ -12,7 +12,7 @@ import ( "sync" "time" - zfs "github.com/mistifyio/go-zfs/v3" + zfs "github.com/mistifyio/go-zfs/v4" "github.com/opencontainers/selinux/go-selinux/label" "github.com/sirupsen/logrus" graphdriver "go.podman.io/storage/drivers" diff --git a/vendor/modules.txt b/vendor/modules.txt index 0dad969b320..a7fc75b823a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -56,8 +56,8 @@ github.com/containerd/log # github.com/containerd/platforms v1.0.0-rc.1 ## explicit; go 1.20 github.com/containerd/platforms -# github.com/containerd/stargz-snapshotter/estargz v0.17.0 -## explicit; go 1.23.0 +# github.com/containerd/stargz-snapshotter/estargz v0.18.0 +## explicit; go 1.24.0 github.com/containerd/stargz-snapshotter/estargz github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/typeurl/v2 v2.2.3 @@ -369,8 +369,8 @@ github.com/json-iterator/go # github.com/kevinburke/ssh_config v1.4.0 ## explicit; go 1.18 github.com/kevinburke/ssh_config -# github.com/klauspost/compress v1.18.0 -## explicit; go 1.22 +# github.com/klauspost/compress v1.18.1 +## explicit; go 1.23 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse @@ -423,9 +423,9 @@ github.com/mdlayher/vsock # github.com/miekg/pkcs11 v1.1.1 ## explicit; go 1.12 github.com/miekg/pkcs11 -# github.com/mistifyio/go-zfs/v3 v3.1.0 +# github.com/mistifyio/go-zfs/v4 v4.0.0 ## explicit; go 1.14 -github.com/mistifyio/go-zfs/v3 +github.com/mistifyio/go-zfs/v4 # github.com/moby/buildkit v0.25.1 ## explicit; go 1.24.0 github.com/moby/buildkit/frontend/dockerfile/command @@ -563,8 +563,8 @@ github.com/openshift/imagebuilder/strslice # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/pkg/sftp v1.13.9 -## explicit; go 1.15 +# github.com/pkg/sftp v1.13.10 +## explicit; go 1.23.0 github.com/pkg/sftp github.com/pkg/sftp/internal/encoding/ssh/filexfer github.com/pkg/sftp/internal/encoding/ssh/filexfer/openssh @@ -681,7 +681,7 @@ github.com/ulikunitz/xz github.com/ulikunitz/xz/internal/hash github.com/ulikunitz/xz/internal/xlog github.com/ulikunitz/xz/lzma -# github.com/vbatts/tar-split v0.12.1 +# github.com/vbatts/tar-split v0.12.2 ## explicit; go 1.17 github.com/vbatts/tar-split/archive/tar github.com/vbatts/tar-split/tar/asm @@ -741,7 +741,7 @@ go.opentelemetry.io/otel/trace go.opentelemetry.io/otel/trace/embedded go.opentelemetry.io/otel/trace/internal/telemetry go.opentelemetry.io/otel/trace/noop -# go.podman.io/common v0.66.0 +# go.podman.io/common v0.66.1-0.20251105205746-f394a8a49d5c ## explicit; go 1.24.2 go.podman.io/common/internal go.podman.io/common/internal/attributedstring @@ -811,7 +811,7 @@ go.podman.io/common/pkg/umask go.podman.io/common/pkg/util go.podman.io/common/pkg/version go.podman.io/common/version -# go.podman.io/image/v5 v5.38.0 +# go.podman.io/image/v5 v5.38.1-0.20251105205746-f394a8a49d5c ## explicit; go 1.24.0 go.podman.io/image/v5/copy go.podman.io/image/v5/directory @@ -885,7 +885,7 @@ go.podman.io/image/v5/transports go.podman.io/image/v5/transports/alltransports go.podman.io/image/v5/types go.podman.io/image/v5/version -# go.podman.io/storage v1.61.0 +# go.podman.io/storage v1.61.1-0.20251105205746-f394a8a49d5c ## explicit; go 1.24.0 go.podman.io/storage go.podman.io/storage/drivers