diff --git a/go.mod b/go.mod index 89ef788e8a..6b0666140a 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.24.4 require ( github.com/blang/semver/v4 v4.0.0 - github.com/containers/image/v5 v5.36.2 github.com/coreos/go-semver v0.3.1 github.com/distribution/reference v0.6.0 github.com/evanphx/json-patch v5.9.11+incompatible @@ -33,6 +32,7 @@ require ( github.com/spf13/cobra v1.10.1 github.com/spf13/pflag v1.0.10 github.com/stretchr/testify v1.11.1 + go.podman.io/image/v5 v5.37.0 golang.org/x/net v0.44.0 golang.org/x/sync v0.17.0 golang.org/x/time v0.13.0 @@ -77,7 +77,6 @@ require ( github.com/containerd/typeurl/v2 v2.2.3 // indirect github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 // indirect github.com/containers/ocicrypt v1.2.1 // indirect - github.com/containers/storage v1.59.1 // indirect github.com/coreos/go-systemd/v22 v22.6.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/docker/cli v28.4.0+incompatible // indirect @@ -155,7 +154,6 @@ require ( go.opentelemetry.io/otel/trace v1.37.0 // indirect go.opentelemetry.io/proto/otlp v1.7.0 // indirect go.podman.io/common v0.65.0 // indirect - go.podman.io/image/v5 v5.37.0 // indirect go.podman.io/storage v1.60.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect diff --git a/go.sum b/go.sum index f3ca084271..82366be3be 100644 --- a/go.sum +++ b/go.sum @@ -1405,14 +1405,10 @@ github.com/containerd/ttrpc v1.2.7 h1:qIrroQvuOL9HQ1X6KHe2ohc7p+HP/0VE6XPU7elJRq github.com/containerd/ttrpc v1.2.7/go.mod h1:YCXHsb32f+Sq5/72xHubdiJRQY9inL4a4ZQrAbN1q9o= github.com/containerd/typeurl/v2 v2.2.3 h1:yNA/94zxWdvYACdYO8zofhrTVuQY73fFU1y++dYSw40= github.com/containerd/typeurl/v2 v2.2.3/go.mod h1:95ljDnPfD3bAbDJRugOiShd/DlAAsxGtUBhJxIn7SCk= -github.com/containers/image/v5 v5.36.2 h1:GcxYQyAHRF/pLqR4p4RpvKllnNL8mOBn0eZnqJbfTwk= -github.com/containers/image/v5 v5.36.2/go.mod h1:b4GMKH2z/5t6/09utbse2ZiLK/c72GuGLFdp7K69eA4= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 h1:Qzk5C6cYglewc+UyGf6lc8Mj2UaPTHy/iF2De0/77CA= github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01/go.mod h1:9rfv8iPl1ZP7aqh9YA68wnZv2NUDbXdcdPHVz0pFbPY= github.com/containers/ocicrypt v1.2.1 h1:0qIOTT9DoYwcKmxSt8QJt+VzMY18onl9jUXsxpVhSmM= github.com/containers/ocicrypt v1.2.1/go.mod h1:aD0AAqfMp0MtwqWgHM1bUwe1anx0VazI108CRrSKINQ= -github.com/containers/storage v1.59.1 h1:11Zu68MXsEQGBBd+GadPrHPpWeqjKS8hJDGiAHgIqDs= -github.com/containers/storage v1.59.1/go.mod h1:KoAYHnAjP3/cTsRS+mmWZGkufSY2GACiKQ4V3ZLQnR0= github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= github.com/coreos/go-systemd/v22 v22.6.0 h1:aGVa/v8B7hpb0TKl0MWoAavPDmHvobFe5R5zn0bCJWo= diff --git a/util/image-canonical-ref/main.go b/util/image-canonical-ref/main.go index 9639d50a11..6558e6bb7a 100644 --- a/util/image-canonical-ref/main.go +++ b/util/image-canonical-ref/main.go @@ -5,10 +5,10 @@ import ( "fmt" "os" - "github.com/containers/image/v5/docker" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" + "go.podman.io/image/v5/docker" + "go.podman.io/image/v5/docker/reference" + "go.podman.io/image/v5/manifest" + "go.podman.io/image/v5/types" ) // This is a simple tool to resolve canonical reference of the image. diff --git a/vendor/github.com/containers/image/v5/LICENSE b/vendor/github.com/containers/image/v5/LICENSE deleted file mode 100644 index 9535635306..0000000000 --- a/vendor/github.com/containers/image/v5/LICENSE +++ /dev/null @@ -1,189 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/image/v5/docker/body_reader.go b/vendor/github.com/containers/image/v5/docker/body_reader.go deleted file mode 100644 index 3c612f2688..0000000000 --- a/vendor/github.com/containers/image/v5/docker/body_reader.go +++ /dev/null @@ -1,253 +0,0 @@ -package docker - -import ( - "context" - "errors" - "fmt" - "io" - "math" - "math/rand/v2" - "net/http" - "net/url" - "strconv" - "strings" - "syscall" - "time" - - "github.com/sirupsen/logrus" -) - -const ( - // bodyReaderMinimumProgress is the minimum progress we consider a good reason to retry - bodyReaderMinimumProgress = 1 * 1024 * 1024 - // bodyReaderMSSinceLastRetry is the minimum time since a last retry we consider a good reason to retry - bodyReaderMSSinceLastRetry = 60 * 1_000 -) - -// bodyReader is an io.ReadCloser returned by dockerImageSource.GetBlob, -// which can transparently resume some (very limited) kinds of aborted connections. -type bodyReader struct { - ctx context.Context - c *dockerClient - path string // path to pass to makeRequest to retry - logURL *url.URL // a string to use in error messages - firstConnectionTime time.Time - - body io.ReadCloser // The currently open connection we use to read data, or nil if there is nothing to read from / close. - lastRetryOffset int64 // -1 if N/A - lastRetryTime time.Time // IsZero() if N/A - offset int64 // Current offset within the blob - lastSuccessTime time.Time // IsZero() if N/A -} - -// newBodyReader creates a bodyReader for request path in c. -// firstBody is an already correctly opened body for the blob, returning the full blob from the start. -// If reading from firstBody fails, bodyReader may heuristically decide to resume. -func newBodyReader(ctx context.Context, c *dockerClient, path string, firstBody io.ReadCloser) (io.ReadCloser, error) { - logURL, err := c.resolveRequestURL(path) - if err != nil { - return nil, err - } - res := &bodyReader{ - ctx: ctx, - c: c, - path: path, - logURL: logURL, - firstConnectionTime: time.Now(), - - body: firstBody, - lastRetryOffset: -1, - lastRetryTime: time.Time{}, - offset: 0, - lastSuccessTime: time.Time{}, - } - return res, nil -} - -// parseDecimalInString ensures that s[start:] starts with a non-negative decimal number, and returns that number and the offset after the number. -func parseDecimalInString(s string, start int) (int64, int, error) { - i := start - for i < len(s) && s[i] >= '0' && s[i] <= '9' { - i++ - } - if i == start { - return -1, -1, errors.New("missing decimal number") - } - v, err := strconv.ParseInt(s[start:i], 10, 64) - if err != nil { - return -1, -1, fmt.Errorf("parsing number: %w", err) - } - return v, i, nil -} - -// parseExpectedChar ensures that s[pos] is the expected byte, and returns the offset after it. -func parseExpectedChar(s string, pos int, expected byte) (int, error) { - if pos == len(s) || s[pos] != expected { - return -1, fmt.Errorf("missing expected %q", expected) - } - return pos + 1, nil -} - -// parseContentRange ensures that res contains a Content-Range header with a byte range, and returns (first, last, completeLength) on success. Size can be -1. -func parseContentRange(res *http.Response) (int64, int64, int64, error) { - hdrs := res.Header.Values("Content-Range") - switch len(hdrs) { - case 0: - return -1, -1, -1, errors.New("missing Content-Range: header") - case 1: - break - default: - return -1, -1, -1, fmt.Errorf("ambiguous Content-Range:, %d header values", len(hdrs)) - } - hdr := hdrs[0] - expectedPrefix := "bytes " - if !strings.HasPrefix(hdr, expectedPrefix) { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, missing prefix %q", hdr, expectedPrefix) - } - first, pos, err := parseDecimalInString(hdr, len(expectedPrefix)) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing first-pos: %w", hdr, err) - } - pos, err = parseExpectedChar(hdr, pos, '-') - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) - } - last, pos, err := parseDecimalInString(hdr, pos) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing last-pos: %w", hdr, err) - } - pos, err = parseExpectedChar(hdr, pos, '/') - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q: %w", hdr, err) - } - completeLength := int64(-1) - if pos < len(hdr) && hdr[pos] == '*' { - pos++ - } else { - completeLength, pos, err = parseDecimalInString(hdr, pos) - if err != nil { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, parsing complete-length: %w", hdr, err) - } - } - if pos < len(hdr) { - return -1, -1, -1, fmt.Errorf("invalid Content-Range: %q, unexpected trailing content", hdr) - } - return first, last, completeLength, nil -} - -// Read implements io.ReadCloser -func (br *bodyReader) Read(p []byte) (int, error) { - if br.body == nil { - return 0, fmt.Errorf("internal error: bodyReader.Read called on a closed object for %s", br.logURL.Redacted()) - } - n, err := br.body.Read(p) - br.offset += int64(n) - switch { - case err == nil || err == io.EOF: - br.lastSuccessTime = time.Now() - return n, err // Unlike the default: case, don’t log anything. - - case errors.Is(err, io.ErrUnexpectedEOF) || errors.Is(err, syscall.ECONNRESET): - originalErr := err - redactedURL := br.logURL.Redacted() - if err := br.errorIfNotReconnecting(originalErr, redactedURL); err != nil { - return n, err - } - - if err := br.body.Close(); err != nil { - logrus.Debugf("Error closing blob body: %v", err) // … and ignore err otherwise - } - br.body = nil - time.Sleep(1*time.Second + rand.N(100_000*time.Microsecond)) // Some jitter so that a failure blip doesn’t cause a deterministic stampede - - headers := map[string][]string{ - "Range": {fmt.Sprintf("bytes=%d-", br.offset)}, - } - res, err := br.c.makeRequest(br.ctx, http.MethodGet, br.path, headers, nil, v2Auth, nil) - if err != nil { - return n, fmt.Errorf("%w (while reconnecting: %v)", originalErr, err) - } - consumedBody := false - defer func() { - if !consumedBody { - res.Body.Close() - } - }() - switch res.StatusCode { - case http.StatusPartialContent: // OK - // A client MUST inspect a 206 response's Content-Type and Content-Range field(s) to determine what parts are enclosed and whether additional requests are needed. - // The recipient of an invalid Content-Range MUST NOT attempt to recombine the received content with a stored representation. - first, last, completeLength, err := parseContentRange(res) - if err != nil { - return n, fmt.Errorf("%w (after reconnecting, invalid Content-Range header: %v)", originalErr, err) - } - // We don’t handle responses that start at an unrequested offset, nor responses that terminate before the end of the full blob. - if first != br.offset || (completeLength != -1 && last+1 != completeLength) { - return n, fmt.Errorf("%w (after reconnecting at offset %d, got unexpected Content-Range %d-%d/%d)", originalErr, br.offset, first, last, completeLength) - } - // Continue below - case http.StatusOK: - return n, fmt.Errorf("%w (after reconnecting, server did not process a Range: header, status %d)", originalErr, http.StatusOK) - default: - err := registryHTTPResponseToError(res) - return n, fmt.Errorf("%w (after reconnecting, fetching blob: %v)", originalErr, err) - } - - logrus.Debugf("Successfully reconnected to %s", redactedURL) - consumedBody = true - br.body = res.Body - br.lastRetryOffset = br.offset - br.lastRetryTime = time.Now() - return n, nil - - default: - logrus.Debugf("Error reading blob body from %s: %#v", br.logURL.Redacted(), err) - return n, err - } -} - -// millisecondsSinceOptional is like currentTime.Sub(tm).Milliseconds, but it returns a floating-point value. -// If tm.IsZero(), it returns math.NaN() -func millisecondsSinceOptional(currentTime time.Time, tm time.Time) float64 { - if tm.IsZero() { - return math.NaN() - } - return float64(currentTime.Sub(tm).Nanoseconds()) / 1_000_000.0 -} - -// errorIfNotReconnecting makes a heuristic decision whether we should reconnect after err at redactedURL; if so, it returns nil, -// otherwise it returns an appropriate error to return to the caller (possibly augmented with data about the heuristic) -func (br *bodyReader) errorIfNotReconnecting(originalErr error, redactedURL string) error { - currentTime := time.Now() - msSinceFirstConnection := millisecondsSinceOptional(currentTime, br.firstConnectionTime) - msSinceLastRetry := millisecondsSinceOptional(currentTime, br.lastRetryTime) - msSinceLastSuccess := millisecondsSinceOptional(currentTime, br.lastSuccessTime) - logrus.Debugf("Reading blob body from %s failed (%#v), decision inputs: total %d @%.3f ms, last retry %d @%.3f ms, last progress @%.3f ms", - redactedURL, originalErr, br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess) - progress := br.offset - br.lastRetryOffset - if progress >= bodyReaderMinimumProgress { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %d bytes…", redactedURL, originalErr, progress) - return nil - } - if br.lastRetryTime.IsZero() { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting (first reconnection)…", redactedURL, originalErr) - return nil - } - if msSinceLastRetry >= bodyReaderMSSinceLastRetry { - logrus.Infof("Reading blob body from %s failed (%v), reconnecting after %.3f ms…", redactedURL, originalErr, msSinceLastRetry) - return nil - } - logrus.Debugf("Not reconnecting to %s: insufficient progress %d / time since last retry %.3f ms", redactedURL, progress, msSinceLastRetry) - return fmt.Errorf("(heuristic tuning data: total %d @%.3f ms, last retry %d @%.3f ms, last progress @ %.3f ms): %w", - br.offset, msSinceFirstConnection, br.lastRetryOffset, msSinceLastRetry, msSinceLastSuccess, originalErr) -} - -// Close implements io.ReadCloser -func (br *bodyReader) Close() error { - if br.body == nil { - return nil - } - err := br.body.Close() - br.body = nil - return err -} diff --git a/vendor/github.com/containers/image/v5/docker/cache.go b/vendor/github.com/containers/image/v5/docker/cache.go deleted file mode 100644 index 728d32d170..0000000000 --- a/vendor/github.com/containers/image/v5/docker/cache.go +++ /dev/null @@ -1,23 +0,0 @@ -package docker - -import ( - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" -) - -// bicTransportScope returns a BICTransportScope appropriate for ref. -func bicTransportScope(ref dockerReference) types.BICTransportScope { - // Blobs can be reused across the whole registry. - return types.BICTransportScope{Opaque: reference.Domain(ref.ref)} -} - -// newBICLocationReference returns a BICLocationReference appropriate for ref. -func newBICLocationReference(ref dockerReference) types.BICLocationReference { - // Blobs are scoped to repositories (the tag/digest are not necessary to reuse a blob). - return types.BICLocationReference{Opaque: ref.ref.Name()} -} - -// parseBICLocationReference returns a repository for encoded lr. -func parseBICLocationReference(lr types.BICLocationReference) (reference.Named, error) { - return reference.ParseNormalizedNamed(lr.Opaque) -} diff --git a/vendor/github.com/containers/image/v5/docker/distribution_error.go b/vendor/github.com/containers/image/v5/docker/distribution_error.go deleted file mode 100644 index 06a9593dcd..0000000000 --- a/vendor/github.com/containers/image/v5/docker/distribution_error.go +++ /dev/null @@ -1,161 +0,0 @@ -// Code below is taken from https://github.com/distribution/distribution/blob/a4d9db5a884b70be0c96dd6a7a9dbef4f2798c51/registry/client/errors.go -// Copyright 2022 github.com/distribution/distribution authors. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package docker - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "slices" - - "github.com/docker/distribution/registry/api/errcode" -) - -// errNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var errNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - // StatusCode code as returned from the server, so callers can - // match the exact code to make certain decisions if needed. - StatusCode int - // status text as displayed in the error message, not exposed as callers should match the number. - status string -} - -func (e UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.status) -} - -func newUnexpectedHTTPStatusError(resp *http.Response) UnexpectedHTTPStatusError { - return UnexpectedHTTPStatusError{ - StatusCode: resp.StatusCode, - status: resp.Status, - } -} - -// unexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type unexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *unexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := io.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &unexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &unexpectedHTTPResponseError{ - ParseErr: errNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -func makeErrorList(err error) []error { - if errL, ok := err.(errcode.Errors); ok { - return []error(errL) - } - return []error{err} -} - -func mergeErrors(err1, err2 error) error { - return errcode.Errors(append(slices.Clone(makeErrorList(err1)), makeErrorList(err2)...)) -} - -// handleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func handleErrorResponse(resp *http.Response) error { - switch { - case resp.StatusCode == http.StatusUnauthorized: - // Check for OAuth errors within the `WWW-Authenticate` header first - // See https://tools.ietf.org/html/rfc6750#section-3 - for c := range iterateAuthHeader(resp.Header) { - if c.Scheme == "bearer" { - var err errcode.Error - // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 - switch c.Parameters["error"] { - case "invalid_token": - err.Code = errcode.ErrorCodeUnauthorized - case "insufficient_scope": - err.Code = errcode.ErrorCodeDenied - default: - continue - } - if description := c.Parameters["error_description"]; description != "" { - err.Message = description - } else { - err.Message = err.Code.Message() - } - - return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) - } - } - fallthrough - case resp.StatusCode >= 400 && resp.StatusCode < 500: - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*unexpectedHTTPResponseError); ok && resp.StatusCode == 401 { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - return newUnexpectedHTTPStatusError(resp) -} diff --git a/vendor/github.com/containers/image/v5/docker/docker_client.go b/vendor/github.com/containers/image/v5/docker/docker_client.go deleted file mode 100644 index 851d3e082d..0000000000 --- a/vendor/github.com/containers/image/v5/docker/docker_client.go +++ /dev/null @@ -1,1209 +0,0 @@ -package docker - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "path/filepath" - "slices" - "strconv" - "strings" - "sync" - "time" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/internal/useragent" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/docker/config" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/pkg/tlsclientconfig" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/docker/distribution/registry/api/errcode" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/docker/go-connections/tlsconfig" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -const ( - dockerHostname = "docker.io" - dockerV1Hostname = "index.docker.io" - dockerRegistry = "registry-1.docker.io" - - resolvedPingV2URL = "%s://%s/v2/" - tagsPath = "/v2/%s/tags/list" - manifestPath = "/v2/%s/manifests/%s" - blobsPath = "/v2/%s/blobs/%s" - blobUploadPath = "/v2/%s/blobs/uploads/" - extensionsSignaturePath = "/extensions/v2/%s/signatures/%s" - - minimumTokenLifetimeSeconds = 60 - - extensionSignatureSchemaVersion = 2 // extensionSignature.Version - extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type - - backoffNumIterations = 5 - backoffInitialDelay = 2 * time.Second - backoffMaxDelay = 60 * time.Second -) - -type certPath struct { - path string - absolute bool -} - -var ( - homeCertDir = filepath.FromSlash(".config/containers/certs.d") - perHostCertDirs = []certPath{ - {path: etcDir + "/containers/certs.d", absolute: true}, - {path: etcDir + "/docker/certs.d", absolute: true}, - } -) - -// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go: -// signature represents a Docker image signature. -type extensionSignature struct { - Version int `json:"schemaVersion"` // Version specifies the schema version - Name string `json:"name"` // Name must be in "sha256:@signatureName" format - Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1" - Content []byte `json:"content"` // Content contains the signature -} - -// signatureList represents list of Docker image signatures. -type extensionSignatureList struct { - Signatures []extensionSignature `json:"signatures"` -} - -// bearerToken records a cached token we can use to authenticate. -type bearerToken struct { - token string - expirationTime time.Time -} - -// dockerClient is configuration for dealing with a single container registry. -type dockerClient struct { - // The following members are set by newDockerClient and do not change afterwards. - sys *types.SystemContext - registry string - userAgent string - - // tlsClientConfig is setup by newDockerClient and will be used and updated - // by detectProperties(). Callers can edit tlsClientConfig.InsecureSkipVerify in the meantime. - tlsClientConfig *tls.Config - // The following members are not set by newDockerClient and must be set by callers if needed. - auth types.DockerAuthConfig - registryToken string - signatureBase lookasideStorageBase - useSigstoreAttachments bool - scope authScope - - // The following members are detected registry properties: - // They are set after a successful detectProperties(), and never change afterwards. - client *http.Client - scheme string - challenges []challenge - supportsSignatures bool - - // Private state for setupRequestAuth (key: string, value: bearerToken) - tokenCache sync.Map - // Private state for detectProperties: - detectPropertiesOnce sync.Once // detectPropertiesOnce is used to execute detectProperties() at most once. - detectPropertiesError error // detectPropertiesError caches the initial error. - // Private state for logResponseWarnings - reportedWarningsLock sync.Mutex - reportedWarnings *set.Set[string] -} - -type authScope struct { - resourceType string - remoteName string - actions string -} - -// sendAuth determines whether we need authentication for v2 or v1 endpoint. -type sendAuth int - -const ( - // v2 endpoint with authentication. - v2Auth sendAuth = iota - // v1 endpoint with authentication. - // TODO: Get v1Auth working - // v1Auth - // no authentication, works for both v1 and v2. - noAuth -) - -// dockerCertDir returns a path to a directory to be consumed by tlsclientconfig.SetupCertificates() depending on ctx and hostPort. -func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) { - if sys != nil && sys.DockerCertPath != "" { - return sys.DockerCertPath, nil - } - if sys != nil && sys.DockerPerHostCertDirPath != "" { - return filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil - } - - var ( - hostCertDir string - fullCertDirPath string - ) - - for _, perHostCertDir := range append([]certPath{{path: filepath.Join(homedir.Get(), homeCertDir), absolute: false}}, perHostCertDirs...) { - if sys != nil && sys.RootForImplicitAbsolutePaths != "" && perHostCertDir.absolute { - hostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, perHostCertDir.path) - } else { - hostCertDir = perHostCertDir.path - } - - fullCertDirPath = filepath.Join(hostCertDir, hostPort) - err := fileutils.Exists(fullCertDirPath) - if err == nil { - break - } - if os.IsNotExist(err) { - continue - } - if os.IsPermission(err) { - logrus.Debugf("error accessing certs directory due to permissions: %v", err) - continue - } - return "", err - } - return fullCertDirPath, nil -} - -// newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) -// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) -// signatureBase is always set in the return value -// The caller must call .Close() on the returned client when done. -func newDockerClientFromRef(sys *types.SystemContext, ref dockerReference, registryConfig *registryConfiguration, write bool, actions string) (*dockerClient, error) { - auth, err := config.GetCredentialsForRef(sys, ref.ref) - if err != nil { - return nil, fmt.Errorf("getting username and password: %w", err) - } - - sigBase, err := registryConfig.lookasideStorageBaseURL(ref, write) - if err != nil { - return nil, err - } - - registry := reference.Domain(ref.ref) - client, err := newDockerClient(sys, registry, ref.ref.Name()) - if err != nil { - return nil, err - } - client.auth = auth - if sys != nil { - client.registryToken = sys.DockerBearerRegistryToken - } - client.signatureBase = sigBase - client.useSigstoreAttachments = registryConfig.useSigstoreAttachments(ref) - client.scope.resourceType = "repository" - client.scope.actions = actions - client.scope.remoteName = reference.Path(ref.ref) - return client, nil -} - -// newDockerClient returns a new dockerClient instance for the given registry -// and reference. The reference is used to query the registry configuration -// and can either be a registry (e.g, "registry.com[:5000]"), a repository -// (e.g., "registry.com[:5000][/some/namespace]/repo"). -// Please note that newDockerClient does not set all members of dockerClient -// (e.g., username and password); those must be set by callers if necessary. -// The caller must call .Close() on the returned client when done. -func newDockerClient(sys *types.SystemContext, registry, reference string) (*dockerClient, error) { - hostName := registry - if registry == dockerHostname { - registry = dockerRegistry - } - tlsClientConfig := &tls.Config{ - CipherSuites: tlsconfig.DefaultServerAcceptedCiphers, - } - - // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry, - // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible - // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because - // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is - // undocumented and may change if docker/docker changes. - certDir, err := dockerCertDir(sys, hostName) - if err != nil { - return nil, err - } - if err := tlsclientconfig.SetupCertificates(certDir, tlsClientConfig); err != nil { - return nil, err - } - - // Check if TLS verification shall be skipped (default=false) which can - // be specified in the sysregistriesv2 configuration. - skipVerify := false - reg, err := sysregistriesv2.FindRegistry(sys, reference) - if err != nil { - return nil, fmt.Errorf("loading registries: %w", err) - } - if reg != nil { - if reg.Blocked { - return nil, fmt.Errorf("registry %s is blocked in %s or %s", reg.Prefix, sysregistriesv2.ConfigPath(sys), sysregistriesv2.ConfigDirPath(sys)) - } - skipVerify = reg.Insecure - } - tlsClientConfig.InsecureSkipVerify = skipVerify - - userAgent := useragent.DefaultUserAgent - if sys != nil && sys.DockerRegistryUserAgent != "" { - userAgent = sys.DockerRegistryUserAgent - } - - return &dockerClient{ - sys: sys, - registry: registry, - userAgent: userAgent, - tlsClientConfig: tlsClientConfig, - reportedWarnings: set.New[string](), - }, nil -} - -// CheckAuth validates the credentials by attempting to log into the registry -// returns an error if an error occurred while making the http request or the status code received was 401 -func CheckAuth(ctx context.Context, sys *types.SystemContext, username, password, registry string) error { - client, err := newDockerClient(sys, registry, registry) - if err != nil { - return fmt.Errorf("creating new docker client: %w", err) - } - defer client.Close() - client.auth = types.DockerAuthConfig{ - Username: username, - Password: password, - } - - resp, err := client.makeRequest(ctx, http.MethodGet, "/v2/", nil, nil, v2Auth, nil) - if err != nil { - return err - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(resp) - if resp.StatusCode == http.StatusUnauthorized { - err = ErrUnauthorizedForCredentials{Err: err} - } - return err - } - return nil -} - -// SearchResult holds the information of each matching image -// It matches the output returned by the v1 endpoint -type SearchResult struct { - Name string `json:"name"` - Description string `json:"description"` - // StarCount states the number of stars the image has - StarCount int `json:"star_count"` - IsTrusted bool `json:"is_trusted"` - // IsAutomated states whether the image is an automated build - IsAutomated bool `json:"is_automated"` - // IsOfficial states whether the image is an official build - IsOfficial bool `json:"is_official"` -} - -// SearchRegistry queries a registry for images that contain "image" in their name -// The limit is the max number of results desired -// Note: The limit value doesn't work with all registries -// for example registry.access.redhat.com returns all the results without limiting it to the limit value -func SearchRegistry(ctx context.Context, sys *types.SystemContext, registry, image string, limit int) ([]SearchResult, error) { - type V2Results struct { - // Repositories holds the results returned by the /v2/_catalog endpoint - Repositories []string `json:"repositories"` - } - type V1Results struct { - // Results holds the results returned by the /v1/search endpoint - Results []SearchResult `json:"results"` - } - v1Res := &V1Results{} - - // Get credentials from authfile for the underlying hostname - // We can't use GetCredentialsForRef here because we want to search the whole registry. - auth, err := config.GetCredentials(sys, registry) - if err != nil { - return nil, fmt.Errorf("getting username and password: %w", err) - } - - // The /v2/_catalog endpoint has been disabled for docker.io therefore - // the call made to that endpoint will fail. So using the v1 hostname - // for docker.io for simplicity of implementation and the fact that it - // returns search results. - hostname := registry - if registry == dockerHostname { - hostname = dockerV1Hostname - // A search term of library/foo does not find the library/foo image on the docker.io servers, - // which is surprising - and that Docker is modifying the search term client-side this same way, - // and it seems convenient to do the same thing. - // Read more here: https://github.com/containers/image/pull/2133#issue-1928524334 - image = strings.TrimPrefix(image, "library/") - } - - client, err := newDockerClient(sys, hostname, registry) - if err != nil { - return nil, fmt.Errorf("creating new docker client: %w", err) - } - defer client.Close() - client.auth = auth - if sys != nil { - client.registryToken = sys.DockerBearerRegistryToken - } - - // Only try the v1 search endpoint if the search query is not empty. If it is - // empty skip to the v2 endpoint. - if image != "" { - // set up the query values for the v1 endpoint - u := url.URL{ - Path: "/v1/search", - } - q := u.Query() - q.Set("q", image) - q.Set("n", strconv.Itoa(limit)) - u.RawQuery = q.Encode() - - logrus.Debugf("trying to talk to v1 search endpoint") - resp, err := client.makeRequest(ctx, http.MethodGet, u.String(), nil, nil, noAuth, nil) - if err != nil { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, err) - } else { - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - logrus.Debugf("error getting search results from v1 endpoint %q: %v", registry, httpResponseToError(resp, "")) - } else { - if err := json.NewDecoder(resp.Body).Decode(v1Res); err != nil { - return nil, err - } - return v1Res.Results, nil - } - } - } - - logrus.Debugf("trying to talk to v2 search endpoint") - searchRes := []SearchResult{} - path := "/v2/_catalog" - for len(searchRes) < limit { - resp, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - logrus.Debugf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) - } - defer resp.Body.Close() - if resp.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(resp) - logrus.Errorf("error getting search results from v2 endpoint %q: %v", registry, err) - return nil, fmt.Errorf("couldn't search registry %q: %w", registry, err) - } - v2Res := &V2Results{} - if err := json.NewDecoder(resp.Body).Decode(v2Res); err != nil { - return nil, err - } - - for _, repo := range v2Res.Repositories { - if len(searchRes) == limit { - break - } - if strings.Contains(repo, image) { - res := SearchResult{ - Name: repo, - } - // bugzilla.redhat.com/show_bug.cgi?id=1976283 - // If we have a full match, make sure it's listed as the first result. - // (Note there might be a full match we never see if we reach the result limit first.) - if repo == image { - searchRes = append([]SearchResult{res}, searchRes...) - } else { - searchRes = append(searchRes, res) - } - } - } - - link := resp.Header.Get("Link") - if link == "" { - break - } - linkURLPart, _, _ := strings.Cut(link, ";") - linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) - if err != nil { - return searchRes, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return searchRes, nil -} - -// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/. -func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader, auth sendAuth, extraScope *authScope) (*http.Response, error) { - if err := c.detectProperties(ctx); err != nil { - return nil, err - } - - requestURL, err := c.resolveRequestURL(path) - if err != nil { - return nil, err - } - return c.makeRequestToResolvedURL(ctx, method, requestURL, headers, stream, -1, auth, extraScope) -} - -// resolveRequestURL turns a path for c.makeRequest into a full URL. -// Most users should call makeRequest directly, this exists basically to make the URL available for debug logs. -func (c *dockerClient) resolveRequestURL(path string) (*url.URL, error) { - urlString := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path) - res, err := url.Parse(urlString) - if err != nil { - return nil, err - } - return res, nil -} - -// Checks if the auth headers in the response contain an indication of a failed -// authorization because of an "insufficient_scope" error. If that's the case, -// returns the required scope to be used for fetching a new token. -func needsRetryWithUpdatedScope(res *http.Response) (bool, *authScope) { - if res.StatusCode == http.StatusUnauthorized { - for challenge := range iterateAuthHeader(res.Header) { - if challenge.Scheme == "bearer" { - if errmsg, ok := challenge.Parameters["error"]; ok && errmsg == "insufficient_scope" { - if scope, ok := challenge.Parameters["scope"]; ok && scope != "" { - if newScope, err := parseAuthScope(scope); err == nil { - return true, newScope - } else { - logrus.WithFields(logrus.Fields{ - "error": err, - "scope": scope, - "challenge": challenge, - }).Error("Failed to parse the authentication scope from the given challenge") - } - } - } - } - } - } - return false, nil -} - -// parseRetryAfter determines the delay required by the "Retry-After" header in res and returns it, -// silently falling back to fallbackDelay if the header is missing or invalid. -func parseRetryAfter(res *http.Response, fallbackDelay time.Duration) time.Duration { - after := res.Header.Get("Retry-After") - if after == "" { - return fallbackDelay - } - logrus.Debugf("Detected 'Retry-After' header %q", after) - // First, check if we have a numerical value. - if num, err := strconv.ParseInt(after, 10, 64); err == nil { - return time.Duration(num) * time.Second - } - // Second, check if we have an HTTP date. - if t, err := http.ParseTime(after); err == nil { - // If the delta between the date and now is positive, use it. - delta := time.Until(t) - if delta > 0 { - return delta - } - logrus.Debugf("Retry-After date in the past, ignoring it") - return fallbackDelay - } - logrus.Debugf("Invalid Retry-After format, ignoring it") - return fallbackDelay -} - -// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// In case of an HTTP 429 status code in the response, it may automatically retry a few times. -// TODO(runcom): too many arguments here, use a struct -func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method string, requestURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - delay := backoffInitialDelay - attempts := 0 - for { - res, err := c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, extraScope) - if err != nil { - return nil, err - } - attempts++ - - // By default we use pre-defined scopes per operation. In - // certain cases, this can fail when our authentication is - // insufficient, then we might be getting an error back with a - // Www-Authenticate Header indicating an insufficient scope. - // - // Check for that and update the client challenges to retry after - // requesting a new token - // - // We only try this on the first attempt, to not overload an - // already struggling server. - // We also cannot retry with a body (stream != nil) as stream - // was already read - if attempts == 1 && stream == nil && auth != noAuth { - if retry, newScope := needsRetryWithUpdatedScope(res); retry { - logrus.Debug("Detected insufficient_scope error, will retry request with updated scope") - res.Body.Close() - // Note: This retry ignores extraScope. That’s, strictly speaking, incorrect, but we don’t currently - // expect the insufficient_scope errors to happen for those callers. If that changes, we can add support - // for more than one extra scope. - res, err = c.makeRequestToResolvedURLOnce(ctx, method, requestURL, headers, stream, streamLen, auth, newScope) - if err != nil { - return nil, err - } - extraScope = newScope - } - } - - if res.StatusCode != http.StatusTooManyRequests || // Only retry on StatusTooManyRequests, success or other failure is returned to caller immediately - stream != nil || // We can't retry with a body (which is not restartable in the general case) - attempts == backoffNumIterations { - return res, nil - } - // close response body before retry or context done - res.Body.Close() - - delay = min(parseRetryAfter(res, delay), backoffMaxDelay) - logrus.Debugf("Too many requests to %s: sleeping for %f seconds before next attempt", requestURL.Redacted(), delay.Seconds()) - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-time.After(delay): - // Nothing - } - delay *= 2 // If the registry does not specify a delay, back off exponentially. - } -} - -// makeRequestToResolvedURLOnce creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client. -// streamLen, if not -1, specifies the length of the data expected on stream. -// makeRequest should generally be preferred. -// Note that no exponential back off is performed when receiving an http 429 status code. -func (c *dockerClient) makeRequestToResolvedURLOnce(ctx context.Context, method string, resolvedURL *url.URL, headers map[string][]string, stream io.Reader, streamLen int64, auth sendAuth, extraScope *authScope) (*http.Response, error) { - req, err := http.NewRequestWithContext(ctx, method, resolvedURL.String(), stream) - if err != nil { - return nil, err - } - if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequestWithContext above can figure out the length of bytes.Reader and similar objects without us having to compute it. - req.ContentLength = streamLen - } - req.Header.Set("Docker-Distribution-API-Version", "registry/2.0") - for n, h := range headers { - for _, hh := range h { - req.Header.Add(n, hh) - } - } - req.Header.Add("User-Agent", c.userAgent) - if auth == v2Auth { - if err := c.setupRequestAuth(req, extraScope); err != nil { - return nil, err - } - } - logrus.Debugf("%s %s", method, resolvedURL.Redacted()) - res, err := c.client.Do(req) - if err != nil { - return nil, err - } - if warnings := res.Header.Values("Warning"); len(warnings) != 0 { - c.logResponseWarnings(res, warnings) - } - return res, nil -} - -// logResponseWarnings logs warningHeaders from res, if any. -func (c *dockerClient) logResponseWarnings(res *http.Response, warningHeaders []string) { - c.reportedWarningsLock.Lock() - defer c.reportedWarningsLock.Unlock() - - for _, header := range warningHeaders { - warningString := parseRegistryWarningHeader(header) - if warningString == "" { - logrus.Debugf("Ignored Warning: header from registry: %q", header) - } else { - if !c.reportedWarnings.Contains(warningString) { - c.reportedWarnings.Add(warningString) - // Note that reportedWarnings is based only on warningString, so that we don’t - // repeat the same warning for every request - but the warning includes the URL; - // so it may not be specific to that URL. - logrus.Warnf("Warning from registry (first encountered at %q): %q", res.Request.URL.Redacted(), warningString) - } else { - logrus.Debugf("Repeated warning from registry at %q: %q", res.Request.URL.Redacted(), warningString) - } - } - } -} - -// parseRegistryWarningHeader parses a Warning: header per RFC 7234, limited to the warning -// values allowed by opencontainers/distribution-spec. -// It returns the warning string if the header has the expected format, or "" otherwise. -func parseRegistryWarningHeader(header string) string { - const expectedPrefix = `299 - "` - const expectedSuffix = `"` - - // warning-value = warn-code SP warn-agent SP warn-text [ SP warn-date ] - // distribution-spec requires warn-code=299, warn-agent="-", warn-date missing - header, ok := strings.CutPrefix(header, expectedPrefix) - if !ok { - return "" - } - header, ok = strings.CutSuffix(header, expectedSuffix) - if !ok { - return "" - } - - // ”Recipients that process the value of a quoted-string MUST handle a quoted-pair - // as if it were replaced by the octet following the backslash.”, so let’s do that… - res := strings.Builder{} - afterBackslash := false - for _, c := range []byte(header) { // []byte because escaping is defined in terms of bytes, not Unicode code points - switch { - case c == 0x7F || (c < ' ' && c != '\t'): - return "" // Control characters are forbidden - case afterBackslash: - res.WriteByte(c) - afterBackslash = false - case c == '"': - // This terminates the warn-text and warn-date, forbidden by distribution-spec, follows, - // or completely invalid input. - return "" - case c == '\\': - afterBackslash = true - default: - res.WriteByte(c) - } - } - if afterBackslash { - return "" - } - return res.String() -} - -// we're using the challenges from the /v2/ ping response and not the one from the destination -// URL in this request because: -// -// 1) docker does that as well -// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request -// -// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up -func (c *dockerClient) setupRequestAuth(req *http.Request, extraScope *authScope) error { - if len(c.challenges) == 0 { - return nil - } - schemeNames := make([]string, 0, len(c.challenges)) - for _, challenge := range c.challenges { - schemeNames = append(schemeNames, challenge.Scheme) - switch challenge.Scheme { - case "basic": - req.SetBasicAuth(c.auth.Username, c.auth.Password) - return nil - case "bearer": - registryToken := c.registryToken - if registryToken == "" { - cacheKey := "" - scopes := []authScope{c.scope} - if extraScope != nil { - // Using ':' as a separator here is unambiguous because getBearerToken below - // uses the same separator when formatting a remote request (and because - // repository names that we create can't contain colons, and extraScope values - // coming from a server come from `parseAuthScope`, which also splits on colons). - cacheKey = fmt.Sprintf("%s:%s:%s", extraScope.resourceType, extraScope.remoteName, extraScope.actions) - if colonCount := strings.Count(cacheKey, ":"); colonCount != 2 { - return fmt.Errorf( - "Internal error: there must be exactly 2 colons in the cacheKey ('%s') but got %d", - cacheKey, - colonCount, - ) - } - scopes = append(scopes, *extraScope) - } - var token bearerToken - t, inCache := c.tokenCache.Load(cacheKey) - if inCache { - token = t.(bearerToken) - } - if !inCache || time.Now().After(token.expirationTime) { - var ( - t *bearerToken - err error - ) - if c.auth.IdentityToken != "" { - t, err = c.getBearerTokenOAuth2(req.Context(), challenge, scopes) - } else { - t, err = c.getBearerToken(req.Context(), challenge, scopes) - } - if err != nil { - return err - } - - token = *t - c.tokenCache.Store(cacheKey, token) - } - registryToken = token.token - } - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", registryToken)) - return nil - default: - logrus.Debugf("no handler for %s authentication", challenge.Scheme) - } - } - logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", ")) - return nil -} - -func (c *dockerClient) getBearerTokenOAuth2(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.New("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequestWithContext(ctx, http.MethodPost, realm, nil) - if err != nil { - return nil, err - } - - // Make the form data required against the oauth2 authentication - // More details here: https://docs.docker.com/registry/spec/auth/oauth/ - params := authReq.URL.Query() - if service, ok := challenge.Parameters["service"]; ok && service != "" { - params.Add("service", service) - } - - for _, scope := range scopes { - if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) - } - } - params.Add("grant_type", "refresh_token") - params.Add("refresh_token", c.auth.IdentityToken) - params.Add("client_id", "containers/image") - - authReq.Body = io.NopCloser(strings.NewReader(params.Encode())) - authReq.Header.Add("User-Agent", c.userAgent) - authReq.Header.Add("Content-Type", "application/x-www-form-urlencoded") - logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := httpResponseToError(res, "Trying to obtain access token"); err != nil { - return nil, err - } - - return newBearerTokenFromHTTPResponseBody(res) -} - -func (c *dockerClient) getBearerToken(ctx context.Context, challenge challenge, - scopes []authScope) (*bearerToken, error) { - realm, ok := challenge.Parameters["realm"] - if !ok { - return nil, errors.New("missing realm in bearer auth challenge") - } - - authReq, err := http.NewRequestWithContext(ctx, http.MethodGet, realm, nil) - if err != nil { - return nil, err - } - - params := authReq.URL.Query() - if c.auth.Username != "" { - params.Add("account", c.auth.Username) - } - - if service, ok := challenge.Parameters["service"]; ok && service != "" { - params.Add("service", service) - } - - for _, scope := range scopes { - if scope.resourceType != "" && scope.remoteName != "" && scope.actions != "" { - params.Add("scope", fmt.Sprintf("%s:%s:%s", scope.resourceType, scope.remoteName, scope.actions)) - } - } - - authReq.URL.RawQuery = params.Encode() - - if c.auth.Username != "" && c.auth.Password != "" { - authReq.SetBasicAuth(c.auth.Username, c.auth.Password) - } - authReq.Header.Add("User-Agent", c.userAgent) - - logrus.Debugf("%s %s", authReq.Method, authReq.URL.Redacted()) - res, err := c.client.Do(authReq) - if err != nil { - return nil, err - } - defer res.Body.Close() - if err := httpResponseToError(res, "Requesting bearer token"); err != nil { - return nil, err - } - - return newBearerTokenFromHTTPResponseBody(res) -} - -// newBearerTokenFromHTTPResponseBody parses a http.Response to obtain a bearerToken. -// The caller is still responsible for ensuring res.Body is closed. -func newBearerTokenFromHTTPResponseBody(res *http.Response) (*bearerToken, error) { - blob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxAuthTokenBodySize) - if err != nil { - return nil, err - } - - var token struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - expirationTime time.Time - } - if err := json.Unmarshal(blob, &token); err != nil { - const bodySampleLength = 50 - bodySample := blob - if len(bodySample) > bodySampleLength { - bodySample = bodySample[:bodySampleLength] - } - return nil, fmt.Errorf("decoding bearer token (last URL %q, body start %q): %w", res.Request.URL.Redacted(), string(bodySample), err) - } - - bt := &bearerToken{ - token: token.Token, - } - if bt.token == "" { - bt.token = token.AccessToken - } - - if token.ExpiresIn < minimumTokenLifetimeSeconds { - token.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn) - } - if token.IssuedAt.IsZero() { - token.IssuedAt = time.Now().UTC() - } - bt.expirationTime = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second) - return bt, nil -} - -// detectPropertiesHelper performs the work of detectProperties which executes -// it at most once. -func (c *dockerClient) detectPropertiesHelper(ctx context.Context) error { - // We overwrite the TLS clients `InsecureSkipVerify` only if explicitly - // specified by the system context - if c.sys != nil && c.sys.DockerInsecureSkipTLSVerify != types.OptionalBoolUndefined { - c.tlsClientConfig.InsecureSkipVerify = c.sys.DockerInsecureSkipTLSVerify == types.OptionalBoolTrue - } - tr := tlsclientconfig.NewTransport() - tr.TLSClientConfig = c.tlsClientConfig - // if set DockerProxyURL explicitly, use the DockerProxyURL instead of system proxy - if c.sys != nil && c.sys.DockerProxyURL != nil { - tr.Proxy = http.ProxyURL(c.sys.DockerProxyURL) - } - c.client = &http.Client{Transport: tr} - - ping := func(scheme string) error { - pingURL, err := url.Parse(fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)) - if err != nil { - return err - } - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, pingURL, nil, nil, -1, noAuth, nil) - if err != nil { - logrus.Debugf("Ping %s err %s (%#v)", pingURL.Redacted(), err.Error(), err) - return err - } - defer resp.Body.Close() - logrus.Debugf("Ping %s status %d", pingURL.Redacted(), resp.StatusCode) - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized { - return registryHTTPResponseToError(resp) - } - c.challenges = slices.Collect(iterateAuthHeader(resp.Header)) - c.scheme = scheme - c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1" - return nil - } - err := ping("https") - if err != nil && c.tlsClientConfig.InsecureSkipVerify { - err = ping("http") - } - if err != nil { - err = fmt.Errorf("pinging container registry %s: %w", c.registry, err) - } - return err -} - -// detectProperties detects various properties of the registry. -// See the dockerClient documentation for members which are affected by this. -func (c *dockerClient) detectProperties(ctx context.Context) error { - c.detectPropertiesOnce.Do(func() { c.detectPropertiesError = c.detectPropertiesHelper(ctx) }) - return c.detectPropertiesError -} - -// fetchManifest fetches a manifest for (the repo of ref) + tagOrDigest. -// The caller is responsible for ensuring tagOrDigest uses the expected format. -func (c *dockerClient) fetchManifest(ctx context.Context, ref dockerReference, tagOrDigest string) ([]byte, string, error) { - path := fmt.Sprintf(manifestPath, reference.Path(ref.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - res, err := c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) - if err != nil { - return nil, "", err - } - logrus.Debugf("Content-Type from manifest GET is %q", res.Header.Get("Content-Type")) - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, "", fmt.Errorf("reading manifest %s in %s: %w", tagOrDigest, ref.ref.Name(), registryHTTPResponseToError(res)) - } - - manblob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxManifestBodySize) - if err != nil { - return nil, "", err - } - return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil -} - -// getExternalBlob returns the reader of the first available blob URL from urls, which must not be empty. -// This function can return nil reader when no url is supported by this function. In this case, the caller -// should fallback to fetch the non-external blob (i.e. pull from the registry). -func (c *dockerClient) getExternalBlob(ctx context.Context, urls []string) (io.ReadCloser, int64, error) { - if len(urls) == 0 { - return nil, 0, errors.New("internal error: getExternalBlob called with no URLs") - } - var remoteErrors []error - for _, u := range urls { - blobURL, err := url.Parse(u) - if err != nil || (blobURL.Scheme != "http" && blobURL.Scheme != "https") { - continue // unsupported url. skip this url. - } - // NOTE: we must not authenticate on additional URLs as those - // can be abused to leak credentials or tokens. Please - // refer to CVE-2020-15157 for more information. - resp, err := c.makeRequestToResolvedURL(ctx, http.MethodGet, blobURL, nil, nil, -1, noAuth, nil) - if err != nil { - remoteErrors = append(remoteErrors, err) - continue - } - if resp.StatusCode != http.StatusOK { - err := fmt.Errorf("error fetching external blob from %q: %w", u, newUnexpectedHTTPStatusError(resp)) - remoteErrors = append(remoteErrors, err) - logrus.Debug(err) - resp.Body.Close() - continue - } - - size, err := getBlobSize(resp) - if err != nil { - size = -1 - } - return resp.Body, size, nil - } - if remoteErrors == nil { - return nil, 0, nil // fallback to non-external blob - } - return nil, 0, fmt.Errorf("failed fetching external blob from all urls: %w", multierr.Format("", ", ", "", remoteErrors)) -} - -func getBlobSize(resp *http.Response) (int64, error) { - hdrs := resp.Header.Values("Content-Length") - if len(hdrs) == 0 { - return -1, errors.New(`Missing "Content-Length" header in response`) - } - hdr := hdrs[0] // Equivalent to resp.Header.Get(…) - size, err := strconv.ParseInt(hdr, 10, 64) - if err != nil { // Go’s response reader should already reject such values. - return -1, err - } - if size < 0 { // '-' is not a valid character in Content-Length, so negative values are invalid. Go’s response reader should already reject such values. - return -1, fmt.Errorf(`Invalid negative "Content-Length" %q`, hdr) - } - return size, nil -} - -// getBlob returns a stream for the specified blob in ref, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (c *dockerClient) getBlob(ctx context.Context, ref dockerReference, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - if len(info.URLs) != 0 { - r, s, err := c.getExternalBlob(ctx, info.URLs) - if err != nil { - return nil, 0, err - } else if r != nil { - return r, s, nil - } - } - - if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters - return nil, 0, err - } - path := fmt.Sprintf(blobsPath, reference.Path(ref.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, 0, err - } - if res.StatusCode != http.StatusOK { - err := registryHTTPResponseToError(res) - res.Body.Close() - return nil, 0, fmt.Errorf("fetching blob: %w", err) - } - cache.RecordKnownLocation(ref.Transport(), bicTransportScope(ref), info.Digest, newBICLocationReference(ref)) - blobSize, err := getBlobSize(res) - if err != nil { - blobSize = -1 - } - - reconnectingReader, err := newBodyReader(ctx, c, path, res.Body) - if err != nil { - res.Body.Close() - return nil, 0, err - } - return reconnectingReader, blobSize, nil -} - -// getOCIDescriptorContents returns the contents a blob specified by descriptor in ref, which must fit within limit. -func (c *dockerClient) getOCIDescriptorContents(ctx context.Context, ref dockerReference, desc imgspecv1.Descriptor, maxSize int, cache types.BlobInfoCache) ([]byte, error) { - // Note that this copies all kinds of attachments: attestations, and whatever else is there, - // not just signatures. We leave the signature consumers to decide based on the MIME type. - - if err := desc.Digest.Validate(); err != nil { // .Algorithm() might panic without this check - return nil, fmt.Errorf("invalid digest %q: %w", desc.Digest.String(), err) - } - digestAlgorithm := desc.Digest.Algorithm() - if !digestAlgorithm.Available() { - return nil, fmt.Errorf("invalid digest %q: unsupported digest algorithm %q", desc.Digest.String(), digestAlgorithm.String()) - } - - reader, _, err := c.getBlob(ctx, ref, manifest.BlobInfoFromOCI1Descriptor(desc), cache) - if err != nil { - return nil, err - } - defer reader.Close() - payload, err := iolimits.ReadAtMost(reader, maxSize) - if err != nil { - return nil, fmt.Errorf("reading blob %s in %s: %w", desc.Digest.String(), ref.ref.Name(), err) - } - actualDigest := digestAlgorithm.FromBytes(payload) - if actualDigest != desc.Digest { - return nil, fmt.Errorf("digest mismatch, expected %q, got %q", desc.Digest.String(), actualDigest.String()) - } - return payload, nil -} - -// isManifestUnknownError returns true iff err from fetchManifest is a “manifest unknown” error. -func isManifestUnknownError(err error) bool { - // docker/distribution, and as defined in the spec - var ec errcode.ErrorCoder - if errors.As(err, &ec) && ec.ErrorCode() == v2.ErrorCodeManifestUnknown { - return true - } - // registry.redhat.io as of October 2022 - var e errcode.Error - if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && e.Message == "Not Found" { - return true - } - // Harbor v2.10.2 - if errors.As(err, &e) && e.ErrorCode() == errcode.ErrorCodeUnknown && strings.Contains(strings.ToLower(e.Message), "not found") { - return true - } - - // opencontainers/distribution-spec does not require the errcode.Error payloads to be used, - // but specifies that the HTTP status must be 404. - var unexpected *unexpectedHTTPResponseError - if errors.As(err, &unexpected) && unexpected.StatusCode == http.StatusNotFound { - return true - } - return false -} - -// getSigstoreAttachmentManifest loads and parses the manifest for sigstore attachments for -// digest in ref. -// It returns (nil, nil) if the manifest does not exist. -func (c *dockerClient) getSigstoreAttachmentManifest(ctx context.Context, ref dockerReference, digest digest.Digest) (*manifest.OCI1, error) { - tag, err := sigstoreAttachmentTag(digest) - if err != nil { - return nil, err - } - sigstoreRef, err := reference.WithTag(reference.TrimNamed(ref.ref), tag) - if err != nil { - return nil, err - } - logrus.Debugf("Looking for sigstore attachments in %s", sigstoreRef.String()) - manifestBlob, mimeType, err := c.fetchManifest(ctx, ref, tag) - if err != nil { - // FIXME: Are we going to need better heuristics?? - // This alone is probably a good enough reason for sigstore to be opt-in only, - // otherwise we would just break ordinary copies. - if isManifestUnknownError(err) { - logrus.Debugf("Fetching sigstore attachment manifest failed, assuming it does not exist: %v", err) - return nil, nil - } - logrus.Debugf("Fetching sigstore attachment manifest failed: %v", err) - return nil, err - } - if mimeType != imgspecv1.MediaTypeImageManifest { - // FIXME: Try anyway?? - return nil, fmt.Errorf("unexpected MIME type for sigstore attachment manifest %s: %q", - sigstoreRef.String(), mimeType) - } - res, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, fmt.Errorf("parsing manifest %s: %w", sigstoreRef.String(), err) - } - return res, nil -} - -// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension, -// using the original data structures. -func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) { - if err := manifestDigest.Validate(); err != nil { // Make sure manifestDigest.String() does not contain any unexpected characters - return nil, err - } - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest) - res, err := c.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("downloading signatures for %s in %s: %w", manifestDigest, ref.ref.Name(), registryHTTPResponseToError(res)) - } - - body, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureListBodySize) - if err != nil { - return nil, err - } - - var parsedBody extensionSignatureList - if err := json.Unmarshal(body, &parsedBody); err != nil { - return nil, fmt.Errorf("decoding signature list: %w", err) - } - return &parsedBody, nil -} - -// sigstoreAttachmentTag returns a sigstore attachment tag for the specified digest. -func sigstoreAttachmentTag(d digest.Digest) (string, error) { - if err := d.Validate(); err != nil { // Make sure d.String() doesn’t contain any unexpected characters - return "", err - } - return strings.Replace(d.String(), ":", "-", 1) + ".sig", nil -} - -// Close removes resources associated with an initialized dockerClient, if any. -func (c *dockerClient) Close() error { - if c.client != nil { - c.client.CloseIdleConnections() - } - return nil -} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image.go b/vendor/github.com/containers/image/v5/docker/docker_image.go deleted file mode 100644 index 74f559dce7..0000000000 --- a/vendor/github.com/containers/image/v5/docker/docker_image.go +++ /dev/null @@ -1,186 +0,0 @@ -package docker - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/image" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods -// which are specific to Docker. -type Image struct { - types.ImageCloser - src *dockerImageSource -} - -// newImage returns a new Image interface type after setting up -// a client to the registry hosting the given image. -// The caller must call .Close() on the returned Image. -func newImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { - s, err := newImageSource(ctx, sys, ref) - if err != nil { - return nil, err - } - img, err := image.FromSource(ctx, sys, s) - if err != nil { - return nil, err - } - return &Image{ImageCloser: img, src: s}, nil -} - -// SourceRefFullName returns a fully expanded name for the repository this image is in. -func (i *Image) SourceRefFullName() string { - return i.src.logicalRef.ref.Name() -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. (This is a -// backward-compatible shim method which calls the module-level -// GetRepositoryTags) -func (i *Image) GetRepositoryTags(ctx context.Context) ([]string, error) { - return GetRepositoryTags(ctx, i.src.c.sys, i.src.logicalRef) -} - -// GetRepositoryTags list all tags available in the repository. The tag -// provided inside the ImageReference will be ignored. -func GetRepositoryTags(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) ([]string, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.New("ref must be a dockerReference") - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - path := fmt.Sprintf(tagsPath, reference.Path(dr.ref)) - client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") - if err != nil { - return nil, fmt.Errorf("failed to create client: %w", err) - } - defer client.Close() - - tags := make([]string, 0) - - for { - res, err := client.makeRequest(ctx, http.MethodGet, path, nil, nil, v2Auth, nil) - if err != nil { - return nil, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("fetching tags list: %w", registryHTTPResponseToError(res)) - } - - var tagsHolder struct { - Tags []string - } - if err = json.NewDecoder(res.Body).Decode(&tagsHolder); err != nil { - return nil, err - } - for _, tag := range tagsHolder.Tags { - if _, err := reference.WithTag(dr.ref, tag); err != nil { // Ensure the tag does not contain unexpected values - // Per https://github.com/containers/skopeo/issues/2409 , Sonatype Nexus 3.58, contrary - // to the spec, may include JSON null values in the list; and Go silently parses them as "". - if tag == "" { - logrus.Debugf("Ignoring invalid empty tag") - continue - } - // Per https://github.com/containers/skopeo/issues/2346 , unknown versions of JFrog Artifactory, - // contrary to the tag format specified in - // https://github.com/opencontainers/distribution-spec/blob/8a871c8234977df058f1a14e299fe0a673853da2/spec.md?plain=1#L160 , - // include digests in the list. - if _, err := digest.Parse(tag); err == nil { - logrus.Debugf("Ignoring invalid tag %q matching a digest format", tag) - continue - } - return nil, fmt.Errorf("registry returned invalid tag %q: %w", tag, err) - } - tags = append(tags, tag) - } - - link := res.Header.Get("Link") - if link == "" { - break - } - - linkURLPart, _, _ := strings.Cut(link, ";") - linkURL, err := url.Parse(strings.Trim(linkURLPart, "<>")) - if err != nil { - return tags, err - } - - // can be relative or absolute, but we only want the path (and I - // guess we're in trouble if it forwards to a new place...) - path = linkURL.Path - if linkURL.RawQuery != "" { - path += "?" - path += linkURL.RawQuery - } - } - return tags, nil -} - -// GetDigest returns the image's digest -// Use this to optimize and avoid use of an ImageSource based on the returned digest; -// if you are going to use an ImageSource anyway, it’s more efficient to create it first -// and compute the digest from the value returned by GetManifest. -// NOTE: Implemented to avoid Docker Hub API limits, and mirror configuration may be -// ignored (but may be implemented in the future) -func GetDigest(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (digest.Digest, error) { - dr, ok := ref.(dockerReference) - if !ok { - return "", errors.New("ref must be a dockerReference") - } - if dr.isUnknownDigest { - return "", fmt.Errorf("docker: reference %q is for unknown digest case; cannot get digest", dr.StringWithinTransport()) - } - - tagOrDigest, err := dr.tagOrDigest() - if err != nil { - return "", err - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return "", err - } - client, err := newDockerClientFromRef(sys, dr, registryConfig, false, "pull") - if err != nil { - return "", fmt.Errorf("failed to create client: %w", err) - } - defer client.Close() - - path := fmt.Sprintf(manifestPath, reference.Path(dr.ref), tagOrDigest) - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - - res, err := client.makeRequest(ctx, http.MethodHead, path, headers, nil, v2Auth, nil) - if err != nil { - return "", err - } - - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - return "", fmt.Errorf("reading digest %s in %s: %w", tagOrDigest, dr.ref.Name(), registryHTTPResponseToError(res)) - } - - dig, err := digest.Parse(res.Header.Get("Docker-Content-Digest")) - if err != nil { - return "", err - } - - return dig, nil -} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go b/vendor/github.com/containers/image/v5/docker/docker_image_dest.go deleted file mode 100644 index 76e48a3845..0000000000 --- a/vendor/github.com/containers/image/v5/docker/docker_image_dest.go +++ /dev/null @@ -1,937 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "io" - "maps" - "net/http" - "net/url" - "os" - "path/filepath" - "slices" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/imagedestination/impl" - "github.com/containers/image/v5/internal/imagedestination/stubs" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/internal/streamdigest" - "github.com/containers/image/v5/internal/uploadreader" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/docker/distribution/registry/api/errcode" - v2 "github.com/docker/distribution/registry/api/v2" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -type dockerImageDestination struct { - impl.Compat - impl.PropertyMethodsInitialize - stubs.IgnoresOriginalOCIConfig - stubs.NoPutBlobPartialInitialize - - ref dockerReference - c *dockerClient - // State - manifestDigest digest.Digest // or "" if not yet known. -} - -// newImageDestination creates a new ImageDestination for the specified image reference. -func newImageDestination(sys *types.SystemContext, ref dockerReference) (private.ImageDestination, error) { - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "pull,push") - if err != nil { - return nil, err - } - mimeTypes := []string{ - imgspecv1.MediaTypeImageManifest, - manifest.DockerV2Schema2MediaType, - imgspecv1.MediaTypeImageIndex, - manifest.DockerV2ListMediaType, - } - if c.sys == nil || !c.sys.DockerDisableDestSchema1MIMETypes { - mimeTypes = append(mimeTypes, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType) - } - - dest := &dockerImageDestination{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - SupportedManifestMIMETypes: mimeTypes, - DesiredLayerCompression: types.Compress, - MustMatchRuntimeOS: false, - IgnoresEmbeddedDockerReference: false, // We do want the manifest updated; older registry versions refuse manifests if the embedded reference does not match. - HasThreadSafePutBlob: true, - }), - NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), - - ref: ref, - c: c, - } - dest.Compat = impl.AddCompat(dest) - return dest, nil -} - -// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, -// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. -func (d *dockerImageDestination) Reference() types.ImageReference { - return d.ref -} - -// Close removes resources associated with an initialized ImageDestination, if any. -func (d *dockerImageDestination) Close() error { - return d.c.Close() -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (d *dockerImageDestination) SupportsSignatures(ctx context.Context) error { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.supportsSignatures: - return nil - case d.c.signatureBase != nil: - return nil - default: - return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool { - return true -} - -// sizeCounter is an io.Writer which only counts the total size of its input. -type sizeCounter struct{ size int64 } - -func (c *sizeCounter) Write(p []byte) (n int, err error) { - c.size += int64(len(p)) - return len(p), nil -} - -// PutBlobWithOptions writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. -func (d *dockerImageDestination) PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options private.PutBlobOptions) (private.UploadedBlob, error) { - // If requested, precompute the blob digest to prevent uploading layers that already exist on the registry. - // This functionality is particularly useful when BlobInfoCache has not been populated with compressed digests, - // the source blob is uncompressed, and the destination blob is being compressed "on the fly". - if inputInfo.Digest == "" && d.c.sys != nil && d.c.sys.DockerRegistryPushPrecomputeDigests { - logrus.Debugf("Precomputing digest layer for %s", reference.Path(d.ref.ref)) - streamCopy, cleanup, err := streamdigest.ComputeBlobInfo(d.c.sys, stream, &inputInfo) - if err != nil { - return private.UploadedBlob{}, err - } - defer cleanup() - stream = streamCopy - } - - if inputInfo.Digest != "" { - // This should not really be necessary, at least the copy code calls TryReusingBlob automatically. - // Still, we need to check, if only because the "initiate upload" endpoint does not have a documented "blob already exists" return value. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, inputInfo, options.Cache) - if err != nil { - return private.UploadedBlob{}, err - } - if haveBlob { - return private.UploadedBlob{Digest: reusedInfo.Digest, Size: reusedInfo.Size}, nil - } - } - - // FIXME? Chunked upload, progress reporting, etc. - uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)) - logrus.Debugf("Uploading %s", uploadPath) - res, err := d.c.makeRequest(ctx, http.MethodPost, uploadPath, nil, nil, v2Auth, nil) - if err != nil { - return private.UploadedBlob{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusAccepted { - logrus.Debugf("Error initiating layer upload, response %#v", *res) - return private.UploadedBlob{}, fmt.Errorf("initiating layer upload to %s in %s: %w", uploadPath, d.c.registry, registryHTTPResponseToError(res)) - } - uploadLocation, err := res.Location() - if err != nil { - return private.UploadedBlob{}, fmt.Errorf("determining upload URL: %w", err) - } - - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, inputInfo) - sizeCounter := &sizeCounter{} - stream = io.TeeReader(stream, sizeCounter) - - uploadLocation, err = func() (*url.URL, error) { // A scope for defer - uploadReader := uploadreader.NewUploadReader(stream) - // This error text should never be user-visible, we terminate only after makeRequestToResolvedURL - // returns, so there isn’t a way for the error text to be provided to any of our callers. - defer uploadReader.Terminate(errors.New("Reading data from an already terminated upload")) - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPatch, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, uploadReader, inputInfo.Size, v2Auth, nil) - if err != nil { - logrus.Debugf("Error uploading layer chunked %v", err) - return nil, err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - return nil, fmt.Errorf("uploading layer chunked: %w", registryHTTPResponseToError(res)) - } - uploadLocation, err := res.Location() - if err != nil { - return nil, fmt.Errorf("determining upload URL: %w", err) - } - return uploadLocation, nil - }() - if err != nil { - return private.UploadedBlob{}, err - } - blobDigest := digester.Digest() - - // FIXME: DELETE uploadLocation on failure (does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope) - - locationQuery := uploadLocation.Query() - locationQuery.Set("digest", blobDigest.String()) - uploadLocation.RawQuery = locationQuery.Encode() - res, err = d.c.makeRequestToResolvedURL(ctx, http.MethodPut, uploadLocation, map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, v2Auth, nil) - if err != nil { - return private.UploadedBlob{}, err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading layer, response %#v", *res) - return private.UploadedBlob{}, fmt.Errorf("uploading layer to %s: %w", uploadLocation, registryHTTPResponseToError(res)) - } - - logrus.Debugf("Upload of layer %s complete", blobDigest) - options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), blobDigest, newBICLocationReference(d.ref)) - return private.UploadedBlob{Digest: blobDigest, Size: sizeCounter.size}, nil -} - -// blobExists returns true iff repo contains a blob with digest, and if so, also its size. -// If the destination does not contain the blob, or it is unknown, blobExists ordinarily returns (false, -1, nil); -// it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) blobExists(ctx context.Context, repo reference.Named, digest digest.Digest, extraScope *authScope) (bool, int64, error) { - if err := digest.Validate(); err != nil { // Make sure digest.String() does not contain any unexpected characters - return false, -1, err - } - checkPath := fmt.Sprintf(blobsPath, reference.Path(repo), digest.String()) - logrus.Debugf("Checking %s", checkPath) - res, err := d.c.makeRequest(ctx, http.MethodHead, checkPath, nil, nil, v2Auth, extraScope) - if err != nil { - return false, -1, err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusOK: - size, err := getBlobSize(res) - if err != nil { - return false, -1, fmt.Errorf("determining size of blob %s in %s: %w", digest, repo.Name(), err) - } - logrus.Debugf("... already exists") - return true, size, nil - case http.StatusUnauthorized: - logrus.Debugf("... not authorized") - return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) - case http.StatusNotFound: - logrus.Debugf("... not present") - return false, -1, nil - default: - return false, -1, fmt.Errorf("checking whether a blob %s exists in %s: %w", digest, repo.Name(), registryHTTPResponseToError(res)) - } -} - -// mountBlob tries to mount blob srcDigest from srcRepo to the current destination. -func (d *dockerImageDestination) mountBlob(ctx context.Context, srcRepo reference.Named, srcDigest digest.Digest, extraScope *authScope) error { - u := url.URL{ - Path: fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref)), - RawQuery: url.Values{ - "mount": {srcDigest.String()}, - "from": {reference.Path(srcRepo)}, - }.Encode(), - } - logrus.Debugf("Trying to mount %s", u.Redacted()) - res, err := d.c.makeRequest(ctx, http.MethodPost, u.String(), nil, nil, v2Auth, extraScope) - if err != nil { - return err - } - defer res.Body.Close() - switch res.StatusCode { - case http.StatusCreated: - logrus.Debugf("... mount OK") - return nil - case http.StatusAccepted: - // Oops, the mount was ignored - either the registry does not support that yet, or the blob does not exist; the registry has started an ordinary upload process. - // Abort, and let the ultimate caller do an upload when its ready, instead. - // NOTE: This does not really work in docker/distribution servers, which incorrectly require the "delete" action in the token's scope, and is thus entirely untested. - uploadLocation, err := res.Location() - if err != nil { - return fmt.Errorf("determining upload URL after a mount attempt: %w", err) - } - logrus.Debugf("... started an upload instead of mounting, trying to cancel at %s", uploadLocation.Redacted()) - res2, err := d.c.makeRequestToResolvedURL(ctx, http.MethodDelete, uploadLocation, nil, nil, -1, v2Auth, extraScope) - if err != nil { - logrus.Debugf("Error trying to cancel an inadvertent upload: %s", err) - } else { - defer res2.Body.Close() - if res2.StatusCode != http.StatusNoContent { - logrus.Debugf("Error trying to cancel an inadvertent upload, status %s", http.StatusText(res.StatusCode)) - } - } - // Anyway, if canceling the upload fails, ignore it and return the more important error: - return fmt.Errorf("Mounting %s from %s to %s started an upload instead", srcDigest, srcRepo.Name(), d.ref.ref.Name()) - default: - logrus.Debugf("Error mounting, response %#v", *res) - return fmt.Errorf("mounting %s from %s to %s: %w", srcDigest, srcRepo.Name(), d.ref.ref.Name(), registryHTTPResponseToError(res)) - } -} - -// tryReusingExactBlob is a subset of TryReusingBlob which _only_ looks for exactly the specified -// blob in the current repository, with no cross-repo reuse or mounting; cache may be updated, it is not read. -// The caller must ensure info.Digest is set. -func (d *dockerImageDestination) tryReusingExactBlob(ctx context.Context, info types.BlobInfo, cache blobinfocache.BlobInfoCache2) (bool, private.ReusedBlob, error) { - exists, size, err := d.blobExists(ctx, d.ref.ref, info.Digest, nil) - if err != nil { - return false, private.ReusedBlob{}, err - } - if exists { - cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, newBICLocationReference(d.ref)) - return true, private.ReusedBlob{Digest: info.Digest, Size: size}, nil - } - return false, private.ReusedBlob{}, nil -} - -func optionalCompressionName(algo *compressiontypes.Algorithm) string { - if algo != nil { - return algo.Name() - } - return "nil" -} - -// TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If the blob has been successfully reused, returns (true, info, nil). -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -func (d *dockerImageDestination) TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options private.TryReusingBlobOptions) (bool, private.ReusedBlob, error) { - if info.Digest == "" { - return false, private.ReusedBlob{}, errors.New("Can not check for a blob with unknown digest") - } - - originalCandidateKnownToBeMissing := false - if impl.OriginalCandidateMatchesTryReusingBlobOptions(options) { - // First, check whether the blob happens to already exist at the destination. - haveBlob, reusedInfo, err := d.tryReusingExactBlob(ctx, info, options.Cache) - if err != nil { - return false, private.ReusedBlob{}, err - } - if haveBlob { - return true, reusedInfo, nil - } - originalCandidateKnownToBeMissing = true - } else { - logrus.Debugf("Ignoring exact blob match, compression %s does not match required %s or MIME types %#v", - optionalCompressionName(options.OriginalCompression), optionalCompressionName(options.RequiredCompression), options.PossibleManifestFormats) - // We can get here with a blob detected to be zstd when the user wants a zstd:chunked. - // In that case we keep originalCandiateKnownToBeMissing = false, so that if we find - // a BIC entry for this blob, we do use that entry and return a zstd:chunked entry - // with the BIC’s annotations. - // This is not quite correct, it only works if the BIC also contains an acceptable _location_. - // Ideally, we could look up just the compression algorithm/annotations for info.digest, - // and use it even if no location candidate exists and the original dandidate is present. - } - - // Then try reusing blobs from other locations. - candidates := options.Cache.CandidateLocations2(d.ref.Transport(), bicTransportScope(d.ref), info.Digest, blobinfocache.CandidateLocations2Options{ - CanSubstitute: options.CanSubstitute, - PossibleManifestFormats: options.PossibleManifestFormats, - RequiredCompression: options.RequiredCompression, - }) - for _, candidate := range candidates { - var candidateRepo reference.Named - if !candidate.UnknownLocation { - var err error - candidateRepo, err = parseBICLocationReference(candidate.Location) - if err != nil { - logrus.Debugf("Error parsing BlobInfoCache location reference: %s", err) - continue - } - if candidate.CompressionAlgorithm != nil { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s in destination repo %s", candidate.Digest.String(), candidate.CompressionAlgorithm.Name(), candidateRepo.Name()) - } else { - logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo %s", candidate.Digest.String(), candidateRepo.Name()) - } - // Sanity checks: - if reference.Domain(candidateRepo) != reference.Domain(d.ref.ref) { - // OCI distribution spec 1.1 allows mounting blobs without specifying the source repo - // (the "from" parameter); in that case we might try to use these candidates as well. - // - // OTOH that would mean we can’t do the “blobExists” check, and if there is no match - // we could get an upload request that we would have to cancel. - logrus.Debugf("... Internal error: domain %s does not match destination %s", reference.Domain(candidateRepo), reference.Domain(d.ref.ref)) - continue - } - } else { - if candidate.CompressionAlgorithm != nil { - logrus.Debugf("Trying to reuse blob with cached digest %s compressed with %s with no location match, checking current repo", candidate.Digest.String(), candidate.CompressionAlgorithm.Name()) - } else { - logrus.Debugf("Trying to reuse blob with cached digest %s in destination repo with no location match, checking current repo", candidate.Digest.String()) - } - // This digest is a known variant of this blob but we don’t - // have a recorded location in this registry, let’s try looking - // for it in the current repo. - candidateRepo = reference.TrimNamed(d.ref.ref) - } - if originalCandidateKnownToBeMissing && - candidateRepo.Name() == d.ref.ref.Name() && candidate.Digest == info.Digest { - logrus.Debug("... Already tried the primary destination") - continue - } - - // Whatever happens here, don't abort the entire operation. It's likely we just don't have permissions, and if it is a critical network error, we will find out soon enough anyway. - - // Checking candidateRepo, and mounting from it, requires an - // expanded token scope. - extraScope := &authScope{ - resourceType: "repository", - remoteName: reference.Path(candidateRepo), - actions: "pull", - } - // This existence check is not, strictly speaking, necessary: We only _really_ need it to get the blob size, and we could record that in the cache instead. - // But a "failed" d.mountBlob currently leaves around an unterminated server-side upload, which we would try to cancel. - // So, without this existence check, it would be 1 request on success, 2 requests on failure; with it, it is 2 requests on success, 1 request on failure. - // On success we avoid the actual costly upload; so, in a sense, the success case is "free", but failures are always costly. - // Even worse, docker/distribution does not actually reasonably implement canceling uploads - // (it would require a "delete" action in the token, and Quay does not give that to anyone, so we can't ask); - // so, be a nice client and don't create unnecessary upload sessions on the server. - exists, size, err := d.blobExists(ctx, candidateRepo, candidate.Digest, extraScope) - if err != nil { - logrus.Debugf("... Failed: %v", err) - continue - } - if !exists { - // FIXME? Should we drop the blob from cache here (and elsewhere?)? - continue // logrus.Debug() already happened in blobExists - } - if candidateRepo.Name() != d.ref.ref.Name() { - if err := d.mountBlob(ctx, candidateRepo, candidate.Digest, extraScope); err != nil { - logrus.Debugf("... Mount failed: %v", err) - continue - } - } - - options.Cache.RecordKnownLocation(d.ref.Transport(), bicTransportScope(d.ref), candidate.Digest, newBICLocationReference(d.ref)) - - return true, private.ReusedBlob{ - Digest: candidate.Digest, - Size: size, - CompressionOperation: candidate.CompressionOperation, - CompressionAlgorithm: candidate.CompressionAlgorithm, - CompressionAnnotations: candidate.CompressionAnnotations, - }, nil - } - - return false, private.ReusedBlob{}, nil -} - -// PutManifest writes manifest to the destination. -// When the primary manifest is a manifest list, if instanceDigest is nil, we're saving the list -// itself, else instanceDigest contains a digest of the specific manifest instance to overwrite the -// manifest for; when the primary manifest is not a manifest list, instanceDigest should always be nil. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. -func (d *dockerImageDestination) PutManifest(ctx context.Context, m []byte, instanceDigest *digest.Digest) error { - var refTail string - // If d.ref.isUnknownDigest=true, then we push without a tag, so get the - // digest that will be used - if d.ref.isUnknownDigest { - digest, err := manifest.Digest(m) - if err != nil { - return err - } - refTail = digest.String() - } else if instanceDigest != nil { - // If the instanceDigest is provided, then use it as the refTail, because the reference, - // whether it includes a tag or a digest, refers to the list as a whole, and not this - // particular instance. - refTail = instanceDigest.String() - // Double-check that the manifest we've been given matches the digest we've been given. - // This also validates the format of instanceDigest. - matches, err := manifest.MatchesDigest(m, *instanceDigest) - if err != nil { - return fmt.Errorf("digesting manifest in PutManifest: %w", err) - } - if !matches { - manifestDigest, merr := manifest.Digest(m) - if merr != nil { - return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest: %w", instanceDigest.String(), merr) - } - return fmt.Errorf("Attempted to PutManifest using an explicitly specified digest (%q) that didn't match the manifest's digest (%q)", instanceDigest.String(), manifestDigest.String()) - } - } else { - // Compute the digest of the main manifest, or the list if it's a list, so that we - // have a digest value to use if we're asked to save a signature for the manifest. - digest, err := manifest.Digest(m) - if err != nil { - return err - } - d.manifestDigest = digest - // The refTail should be either a digest (which we expect to match the value we just - // computed) or a tag name. - refTail, err = d.ref.tagOrDigest() - if err != nil { - return err - } - } - - return d.uploadManifest(ctx, m, refTail) -} - -// uploadManifest writes manifest to tagOrDigest. -func (d *dockerImageDestination) uploadManifest(ctx context.Context, m []byte, tagOrDigest string) error { - path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), tagOrDigest) - - headers := map[string][]string{} - mimeType := manifest.GuessMIMEType(m) - if mimeType != "" { - headers["Content-Type"] = []string{mimeType} - } - res, err := d.c.makeRequest(ctx, http.MethodPut, path, headers, bytes.NewReader(m), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if !successStatus(res.StatusCode) { - rawErr := registryHTTPResponseToError(res) - err := fmt.Errorf("uploading manifest %s to %s: %w", tagOrDigest, d.ref.ref.Name(), rawErr) - if isManifestInvalidError(rawErr) { - err = types.ManifestTypeRejectedError{Err: err} - } - return err - } - // A HTTP server may not be a registry at all, and just return 200 OK to everything - // (in particular that can fairly easily happen after tearing down a website and - // replacing it with a global 302 redirect to a new website, completely ignoring the - // path in the request); in that case we could “succeed” uploading a whole image. - // With docker/distribution we could rely on a Docker-Content-Digest header being present - // (because docker/distribution/registry/client has been failing uploads if it was missing), - // but that has been defined as explicitly optional by - // https://github.com/opencontainers/distribution-spec/blob/ec90a2af85fe4d612cf801e1815b95bfa40ae72b/spec.md#legacy-docker-support-http-headers - // So, just note the missing header in a debug log. - if v := res.Header.Values("Docker-Content-Digest"); len(v) == 0 { - logrus.Debugf("Manifest upload response didn’t contain a Docker-Content-Digest header, it might not be a container registry") - } - return nil -} - -// successStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func successStatus(status int) bool { - return status >= 200 && status <= 399 -} - -// isManifestInvalidError returns true iff err from registryHTTPResponseToError is a “manifest invalid” error. -func isManifestInvalidError(err error) bool { - var ec errcode.ErrorCoder - if ok := errors.As(err, &ec); !ok { - return false - } - - switch ec.ErrorCode() { - // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false. - case v2.ErrorCodeManifestInvalid: - return true - // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd) - // when uploading to a tag (because it can’t find a matching tag inside the manifest) - case v2.ErrorCodeTagInvalid: - return true - // ErrorCodeUnsupported with 'Invalid JSON syntax' is returned by AWS ECR when - // uploading an OCI manifest that is (correctly, according to the spec) missing - // a top-level media type. See libpod issue #1719 - // FIXME: remove this case when ECR behavior is fixed - case errcode.ErrorCodeUnsupported: - return strings.Contains(err.Error(), "Invalid JSON syntax") - default: - return false - } -} - -// PutSignaturesWithFormat writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (d *dockerImageDestination) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { - if instanceDigest == nil { - if d.manifestDigest == "" { - // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures - return errors.New("Unknown manifest digest, can't add signatures") - } - instanceDigest = &d.manifestDigest - } - - sigstoreSignatures := []signature.Sigstore{} - otherSignatures := []signature.Signature{} - for _, sig := range signatures { - if sigstoreSig, ok := sig.(signature.Sigstore); ok { - sigstoreSignatures = append(sigstoreSignatures, sigstoreSig) - } else { - otherSignatures = append(otherSignatures, sig) - } - } - - // Only write sigstores signatures to sigstores attachments. We _could_ store them to lookaside - // instead, but that would probably be rather surprising. - // FIXME: So should we enable sigstores in all cases? Or write in all cases, but opt-in to read? - - if len(sigstoreSignatures) != 0 { - if err := d.putSignaturesToSigstoreAttachments(ctx, sigstoreSignatures, *instanceDigest); err != nil { - return err - } - } - - if len(otherSignatures) != 0 { - if err := d.c.detectProperties(ctx); err != nil { - return err - } - switch { - case d.c.supportsSignatures: - if err := d.putSignaturesToAPIExtension(ctx, otherSignatures, *instanceDigest); err != nil { - return err - } - case d.c.signatureBase != nil: - if err := d.putSignaturesToLookaside(otherSignatures, *instanceDigest); err != nil { - return err - } - default: - return errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } - } - - return nil -} - -// putSignaturesToLookaside implements PutSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil, for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToLookaside(signatures []signature.Signature, manifestDigest digest.Digest) error { - // FIXME? This overwrites files one at a time, definitely not atomic. - // A failure when updating signatures with a reordered copy could lose some of them. - - // Skip dealing with the manifest digest if not necessary. - if len(signatures) == 0 { - return nil - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i, signature := range signatures { - sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - if err := d.putOneSignature(sigURL, signature); err != nil { - return err - } - } - // Remove any other signatures, if present. - // We stop at the first missing signature; if a previous deleting loop aborted - // prematurely, this may not clean up all of them, but one missing signature - // is enough for dockerImageSource to stop looking for other signatures, so that - // is sufficient. - for i := len(signatures); ; i++ { - sigURL, err := lookasideStorageURL(d.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - missing, err := d.c.deleteOneSignature(sigURL) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -// putOneSignature stores sig to sigURL. -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (d *dockerImageDestination) putOneSignature(sigURL *url.URL, sig signature.Signature) error { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Writing to %s", sigURL.Path) - err := os.MkdirAll(filepath.Dir(sigURL.Path), 0755) - if err != nil { - return err - } - blob, err := signature.Blob(sig) - if err != nil { - return err - } - err = os.WriteFile(sigURL.Path, blob, 0644) - if err != nil { - return err - } - return nil - - case "http", "https": - return fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) - default: - return fmt.Errorf("Unsupported scheme when writing signature to %s", sigURL.Redacted()) - } -} - -func (d *dockerImageDestination) putSignaturesToSigstoreAttachments(ctx context.Context, signatures []signature.Sigstore, manifestDigest digest.Digest) error { - if !d.c.useSigstoreAttachments { - return errors.New("writing sigstore attachments is disabled by configuration") - } - - ociManifest, err := d.c.getSigstoreAttachmentManifest(ctx, d.ref, manifestDigest) - if err != nil { - return err - } - var ociConfig imgspecv1.Image // Most fields empty by default - if ociManifest == nil { - ociManifest = manifest.OCI1FromComponents(imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Digest: "", // We will fill this in later. - Size: 0, - }, nil) - ociConfig.RootFS.Type = "layers" - } else { - logrus.Debugf("Fetching sigstore attachment config %s", ociManifest.Config.Digest.String()) - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. - configBlob, err := d.c.getOCIDescriptorContents(ctx, d.ref, ociManifest.Config, iolimits.MaxConfigBodySize, - none.NoCache) - if err != nil { - return err - } - if err := json.Unmarshal(configBlob, &ociConfig); err != nil { - return fmt.Errorf("parsing sigstore attachment config %s in %s: %w", ociManifest.Config.Digest.String(), - d.ref.ref.Name(), err) - } - } - - // To make sure we can safely append to the slices of ociManifest, without adding a remote dependency on the code that creates it. - ociManifest.Layers = slices.Clone(ociManifest.Layers) - // We don’t need to ^^^ for ociConfig.RootFS.DiffIDs because we have created it empty ourselves, and json.Unmarshal is documented to append() to - // the slice in the original object (or in a newly allocated object). - for _, sig := range signatures { - mimeType := sig.UntrustedMIMEType() - payloadBlob := sig.UntrustedPayload() - annotations := sig.UntrustedAnnotations() - - alreadyOnRegistry := false - for _, layer := range ociManifest.Layers { - if layerMatchesSigstoreSignature(layer, mimeType, payloadBlob, annotations) { - logrus.Debugf("Signature with digest %s already exists on the registry", layer.Digest.String()) - alreadyOnRegistry = true - break - } - } - if alreadyOnRegistry { - continue - } - - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. - // That might eventually need to change if payloads grow to be not just signatures, but something - // significantly large. - sigDesc, err := d.putBlobBytesAsOCI(ctx, payloadBlob, mimeType, private.PutBlobOptions{ - Cache: none.NoCache, - IsConfig: false, - EmptyLayer: false, - LayerIndex: nil, - }) - if err != nil { - return err - } - sigDesc.Annotations = annotations - ociManifest.Layers = append(ociManifest.Layers, sigDesc) - ociConfig.RootFS.DiffIDs = append(ociConfig.RootFS.DiffIDs, sigDesc.Digest) - logrus.Debugf("Adding new signature, digest %s", sigDesc.Digest.String()) - } - - configBlob, err := json.Marshal(ociConfig) - if err != nil { - return err - } - logrus.Debugf("Uploading updated sigstore attachment config") - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount configs. - configDesc, err := d.putBlobBytesAsOCI(ctx, configBlob, imgspecv1.MediaTypeImageConfig, private.PutBlobOptions{ - Cache: none.NoCache, - IsConfig: true, - EmptyLayer: false, - LayerIndex: nil, - }) - if err != nil { - return err - } - ociManifest.Config = configDesc - - manifestBlob, err := ociManifest.Serialize() - if err != nil { - return err - } - attachmentTag, err := sigstoreAttachmentTag(manifestDigest) - if err != nil { - return err - } - logrus.Debugf("Uploading sigstore attachment manifest") - return d.uploadManifest(ctx, manifestBlob, attachmentTag) -} - -func layerMatchesSigstoreSignature(layer imgspecv1.Descriptor, mimeType string, - payloadBlob []byte, annotations map[string]string) bool { - if layer.MediaType != mimeType || - layer.Size != int64(len(payloadBlob)) || - // This is not quite correct, we should use the layer’s digest algorithm. - // But right now we don’t want to deal with corner cases like bad digest formats - // or unavailable algorithms; in the worst case we end up with duplicate signature - // entries. - layer.Digest.String() != digest.FromBytes(payloadBlob).String() || - !maps.Equal(layer.Annotations, annotations) { - return false - } - return true -} - -// putBlobBytesAsOCI uploads a blob with the specified contents, and returns an appropriate -// OCI descriptor. -func (d *dockerImageDestination) putBlobBytesAsOCI(ctx context.Context, contents []byte, mimeType string, options private.PutBlobOptions) (imgspecv1.Descriptor, error) { - blobDigest := digest.FromBytes(contents) - info, err := d.PutBlobWithOptions(ctx, bytes.NewReader(contents), - types.BlobInfo{ - Digest: blobDigest, - Size: int64(len(contents)), - MediaType: mimeType, - }, options) - if err != nil { - return imgspecv1.Descriptor{}, fmt.Errorf("writing blob %s: %w", blobDigest.String(), err) - } - return imgspecv1.Descriptor{ - MediaType: mimeType, - Digest: info.Digest, - Size: info.Size, - }, nil -} - -// deleteOneSignature deletes a signature from sigURL, if it exists. -// If it successfully determines that the signature does not exist, returns (true, nil) -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (c *dockerClient) deleteOneSignature(sigURL *url.URL) (missing bool, err error) { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Deleting %s", sigURL.Path) - err := os.Remove(sigURL.Path) - if err != nil && os.IsNotExist(err) { - return true, nil - } - return false, err - - case "http", "https": - return false, fmt.Errorf("Writing directly to a %s lookaside %s is not supported. Configure a lookaside-staging: location", sigURL.Scheme, sigURL.Redacted()) - default: - return false, fmt.Errorf("Unsupported scheme when deleting signature from %s", sigURL.Redacted()) - } -} - -// putSignaturesToAPIExtension implements PutSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, -// for a manifest with manifestDigest. -func (d *dockerImageDestination) putSignaturesToAPIExtension(ctx context.Context, signatures []signature.Signature, manifestDigest digest.Digest) error { - // Skip dealing with the manifest digest, or reading the old state, if not necessary. - if len(signatures) == 0 { - return nil - } - - // Because image signatures are a shared resource in Atomic Registry, the default upload - // always adds signatures. Eventually we should also allow removing signatures, - // but the X-Registry-Supports-Signatures API extension does not support that yet. - - existingSignatures, err := d.c.getExtensionsSignatures(ctx, d.ref, manifestDigest) - if err != nil { - return err - } - existingSigNames := set.New[string]() - for _, sig := range existingSignatures.Signatures { - existingSigNames.Add(sig.Name) - } - - for _, newSigWithFormat := range signatures { - newSigSimple, ok := newSigWithFormat.(signature.SimpleSigning) - if !ok { - return signature.UnsupportedFormatError(newSigWithFormat) - } - newSig := newSigSimple.UntrustedSignature() - - if slices.ContainsFunc(existingSignatures.Signatures, func(existingSig extensionSignature) bool { - return existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) - }) { - continue - } - - // The API expect us to invent a new unique name. This is racy, but hopefully good enough. - var signatureName string - for { - randBytes := make([]byte, 16) - n, err := rand.Read(randBytes) - if err != nil || n != 16 { - return fmt.Errorf("generating random signature len %d: %w", n, err) - } - signatureName = fmt.Sprintf("%s@%032x", manifestDigest.String(), randBytes) - if !existingSigNames.Contains(signatureName) { - break - } - } - sig := extensionSignature{ - Version: extensionSignatureSchemaVersion, - Name: signatureName, - Type: extensionSignatureTypeAtomic, - Content: newSig, - } - body, err := json.Marshal(sig) - if err != nil { - return err - } - - // manifestDigest is known to be valid because it was not rejected by getExtensionsSignatures above. - path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), manifestDigest.String()) - res, err := d.c.makeRequest(ctx, http.MethodPut, path, nil, bytes.NewReader(body), v2Auth, nil) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusCreated { - logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res) - return fmt.Errorf("uploading signature to %s in %s: %w", path, d.c.registry, registryHTTPResponseToError(res)) - } - } - - return nil -} - -// CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before CommitWithOptions() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) -func (d *dockerImageDestination) CommitWithOptions(ctx context.Context, options private.CommitOptions) error { - return nil -} diff --git a/vendor/github.com/containers/image/v5/docker/docker_image_src.go b/vendor/github.com/containers/image/v5/docker/docker_image_src.go deleted file mode 100644 index 4eb9cdfba5..0000000000 --- a/vendor/github.com/containers/image/v5/docker/docker_image_src.go +++ /dev/null @@ -1,863 +0,0 @@ -package docker - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "fmt" - "io" - "math" - "mime" - "mime/multipart" - "net/http" - "net/url" - "os" - "os/exec" - "strings" - "sync" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/imagesource/impl" - "github.com/containers/image/v5/internal/imagesource/stubs" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/regexp" - digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" -) - -// maxLookasideSignatures is an arbitrary limit for the total number of signatures we would try to read from a lookaside server, -// even if it were broken or malicious and it continued serving an enormous number of items. -const maxLookasideSignatures = 128 - -type dockerImageSource struct { - impl.Compat - impl.PropertyMethodsInitialize - impl.DoesNotAffectLayerInfosForCopy - stubs.ImplementsGetBlobAt - - logicalRef dockerReference // The reference the user requested. This must satisfy !isUnknownDigest - physicalRef dockerReference // The actual reference we are accessing (possibly a mirror). This must satisfy !isUnknownDigest - c *dockerClient - // State - cachedManifest []byte // nil if not loaded yet - cachedManifestMIMEType string // Only valid if cachedManifest != nil -} - -// newImageSource creates a new ImageSource for the specified image reference. -// The caller must call .Close() on the returned ImageSource. -// The caller must ensure !ref.isUnknownDigest. -func newImageSource(ctx context.Context, sys *types.SystemContext, ref dockerReference) (*dockerImageSource, error) { - if ref.isUnknownDigest { - return nil, fmt.Errorf("reading images from docker: reference %q without a tag or digest is not supported", ref.StringWithinTransport()) - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - registry, err := sysregistriesv2.FindRegistry(sys, ref.ref.Name()) - if err != nil { - return nil, fmt.Errorf("loading registries configuration: %w", err) - } - if registry == nil { - // No configuration was found for the provided reference, so use the - // equivalent of a default configuration. - registry = &sysregistriesv2.Registry{ - Endpoint: sysregistriesv2.Endpoint{ - Location: ref.ref.String(), - }, - Prefix: ref.ref.String(), - } - } - - // Check all endpoints for the manifest availability. If we find one that does - // contain the image, it will be used for all future pull actions. Always try the - // non-mirror original location last; this both transparently handles the case - // of no mirrors configured, and ensures we return the error encountered when - // accessing the upstream location if all endpoints fail. - pullSources, err := registry.PullSourcesFromReference(ref.ref) - if err != nil { - return nil, err - } - type attempt struct { - ref reference.Named - err error - } - attempts := []attempt{} - for _, pullSource := range pullSources { - if sys != nil && sys.DockerLogMirrorChoice { - logrus.Infof("Trying to access %q", pullSource.Reference) - } else { - logrus.Debugf("Trying to access %q", pullSource.Reference) - } - s, err := newImageSourceAttempt(ctx, sys, ref, pullSource, registryConfig) - if err == nil { - return s, nil - } - logrus.Debugf("Accessing %q failed: %v", pullSource.Reference, err) - attempts = append(attempts, attempt{ - ref: pullSource.Reference, - err: err, - }) - } - switch len(attempts) { - case 0: - return nil, errors.New("Internal error: newImageSource returned without trying any endpoint") - case 1: - return nil, attempts[0].err // If no mirrors are used, perfectly preserve the error type and add no noise. - default: - // Don’t just build a string, try to preserve the typed error. - primary := &attempts[len(attempts)-1] - extras := []string{} - for _, attempt := range attempts[:len(attempts)-1] { - // This is difficult to fit into a single-line string, when the error can contain arbitrary strings including any metacharacters we decide to use. - // The paired [] at least have some chance of being unambiguous. - extras = append(extras, fmt.Sprintf("[%s: %v]", attempt.ref.String(), attempt.err)) - } - return nil, fmt.Errorf("(Mirrors also failed: %s): %s: %w", strings.Join(extras, "\n"), primary.ref.String(), primary.err) - } -} - -// newImageSourceAttempt is an internal helper for newImageSource. Everyone else must call newImageSource. -// Given a logicalReference and a pullSource, return a dockerImageSource if it is reachable. -// The caller must call .Close() on the returned ImageSource. -func newImageSourceAttempt(ctx context.Context, sys *types.SystemContext, logicalRef dockerReference, pullSource sysregistriesv2.PullSource, - registryConfig *registryConfiguration) (*dockerImageSource, error) { - physicalRef, err := newReference(pullSource.Reference, false) - if err != nil { - return nil, err - } - - endpointSys := sys - // sys.DockerAuthConfig does not explicitly specify a registry; we must not blindly send the credentials intended for the primary endpoint to mirrors. - if endpointSys != nil && endpointSys.DockerAuthConfig != nil && reference.Domain(physicalRef.ref) != reference.Domain(logicalRef.ref) { - copy := *endpointSys - copy.DockerAuthConfig = nil - copy.DockerBearerRegistryToken = "" - endpointSys = © - } - - client, err := newDockerClientFromRef(endpointSys, physicalRef, registryConfig, false, "pull") - if err != nil { - return nil, err - } - client.tlsClientConfig.InsecureSkipVerify = pullSource.Endpoint.Insecure - - s := &dockerImageSource{ - PropertyMethodsInitialize: impl.PropertyMethods(impl.Properties{ - HasThreadSafeGetBlob: true, - }), - - logicalRef: logicalRef, - physicalRef: physicalRef, - c: client, - } - s.Compat = impl.AddCompat(s) - - if err := s.ensureManifestIsLoaded(ctx); err != nil { - client.Close() - return nil, err - } - - if h, err := sysregistriesv2.AdditionalLayerStoreAuthHelper(endpointSys); err == nil && h != "" { - acf := map[string]struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - IdentityToken string `json:"identityToken,omitempty"` - }{ - physicalRef.ref.String(): { - Username: client.auth.Username, - Password: client.auth.Password, - IdentityToken: client.auth.IdentityToken, - }, - } - acfD, err := json.Marshal(acf) - if err != nil { - logrus.Warnf("failed to marshal auth config: %v", err) - } else { - cmd := exec.Command(h) - cmd.Stdin = bytes.NewReader(acfD) - if err := cmd.Run(); err != nil { - var stderr string - if ee, ok := err.(*exec.ExitError); ok { - stderr = string(ee.Stderr) - } - logrus.Warnf("Failed to call additional-layer-store-auth-helper (stderr:%s): %v", stderr, err) - } - } - } - return s, nil -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (s *dockerImageSource) Reference() types.ImageReference { - return s.logicalRef -} - -// Close removes resources associated with an initialized ImageSource, if any. -func (s *dockerImageSource) Close() error { - return s.c.Close() -} - -// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) -// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. -func simplifyContentType(contentType string) string { - if contentType == "" { - return contentType - } - mimeType, _, err := mime.ParseMediaType(contentType) - if err != nil { - return "" - } - return mimeType -} - -// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). -// It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); -// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) { - if instanceDigest != nil { - if err := instanceDigest.Validate(); err != nil { // Make sure instanceDigest.String() does not contain any unexpected characters - return nil, "", err - } - return s.fetchManifest(ctx, instanceDigest.String()) - } - err := s.ensureManifestIsLoaded(ctx) - if err != nil { - return nil, "", err - } - return s.cachedManifest, s.cachedManifestMIMEType, nil -} - -// fetchManifest fetches a manifest for tagOrDigest. -// The caller is responsible for ensuring tagOrDigest uses the expected format. -func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) { - return s.c.fetchManifest(ctx, s.physicalRef, tagOrDigest) -} - -// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType -// -// ImageSource implementations are not required or expected to do any caching, -// but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest(ctx, nil) -// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious -// signature verification failures when pulling while a tag is being updated. -func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { - if s.cachedManifest != nil { - return nil - } - - reference, err := s.physicalRef.tagOrDigest() - if err != nil { - return err - } - - manblob, mt, err := s.fetchManifest(ctx, reference) - if err != nil { - return err - } - // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors. - s.cachedManifest = manblob - s.cachedManifestMIMEType = mt - return nil -} - -// splitHTTP200ResponseToPartial splits a 200 response in multiple streams as specified by the chunks -func splitHTTP200ResponseToPartial(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk) { - defer close(streams) - defer close(errs) - currentOffset := uint64(0) - - body = makeBufferedNetworkReader(body, 64, 16384) - defer body.Close() - for _, c := range chunks { - if c.Offset != currentOffset { - if c.Offset < currentOffset { - errs <- fmt.Errorf("invalid chunk offset specified %v (expected >= %v)", c.Offset, currentOffset) - break - } - toSkip := c.Offset - currentOffset - if _, err := io.Copy(io.Discard, io.LimitReader(body, int64(toSkip))); err != nil { - errs <- err - break - } - currentOffset += toSkip - } - var reader io.Reader - if c.Length == math.MaxUint64 { - reader = body - } else { - reader = io.LimitReader(body, int64(c.Length)) - } - s := signalCloseReader{ - closed: make(chan struct{}), - stream: io.NopCloser(reader), - consumeStream: true, - } - streams <- s - - // Wait until the stream is closed before going to the next chunk - <-s.closed - currentOffset += c.Length - } -} - -// handle206Response reads a 206 response and send each part as a separate ReadCloser to the streams chan. -func handle206Response(streams chan io.ReadCloser, errs chan error, body io.ReadCloser, chunks []private.ImageSourceChunk, mediaType string, params map[string]string) { - defer close(streams) - defer close(errs) - if !strings.HasPrefix(mediaType, "multipart/") { - streams <- body - return - } - boundary, found := params["boundary"] - if !found { - errs <- errors.New("could not find boundary") - body.Close() - return - } - buffered := makeBufferedNetworkReader(body, 64, 16384) - defer buffered.Close() - mr := multipart.NewReader(buffered, boundary) - parts := 0 - for { - p, err := mr.NextPart() - if err != nil { - if err != io.EOF { - errs <- err - } - if parts != len(chunks) { - errs <- errors.New("invalid number of chunks returned by the server") - } - return - } - if parts >= len(chunks) { - errs <- errors.New("too many parts returned by the server") - break - } - s := signalCloseReader{ - closed: make(chan struct{}), - stream: p, - } - streams <- s - // NextPart() cannot be called while the current part - // is being read, so wait until it is closed - <-s.closed - parts++ - } -} - -var multipartByteRangesRe = regexp.Delayed("multipart/byteranges; boundary=([A-Za-z-0-9:]+)") - -func parseMediaType(contentType string) (string, map[string]string, error) { - mediaType, params, err := mime.ParseMediaType(contentType) - if err != nil { - if err == mime.ErrInvalidMediaParameter { - // CloudFront returns an invalid MIME type, that contains an unquoted ":" in the boundary - // param, let's handle it here. - matches := multipartByteRangesRe.FindStringSubmatch(contentType) - if len(matches) == 2 { - mediaType = "multipart/byteranges" - params = map[string]string{ - "boundary": matches[1], - } - err = nil - } - } - if err != nil { - return "", nil, err - } - } - return mediaType, params, err -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -// If the Length for the last chunk is set to math.MaxUint64, then it -// fully fetches the remaining data from the offset to the end of the blob. -func (s *dockerImageSource) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - headers := make(map[string][]string) - - rangeVals := make([]string, 0, len(chunks)) - lastFound := false - for _, c := range chunks { - if lastFound { - return nil, nil, fmt.Errorf("internal error: another chunk requested after an util-EOF chunk") - } - // If the Length is set to -1, then request anything after the specified offset. - if c.Length == math.MaxUint64 { - lastFound = true - rangeVals = append(rangeVals, fmt.Sprintf("%d-", c.Offset)) - } else { - rangeVals = append(rangeVals, fmt.Sprintf("%d-%d", c.Offset, c.Offset+c.Length-1)) - } - } - - headers["Range"] = []string{fmt.Sprintf("bytes=%s", strings.Join(rangeVals, ","))} - - if len(info.URLs) != 0 { - return nil, nil, fmt.Errorf("external URLs not supported with GetBlobAt") - } - - if err := info.Digest.Validate(); err != nil { // Make sure info.Digest.String() does not contain any unexpected characters - return nil, nil, err - } - path := fmt.Sprintf(blobsPath, reference.Path(s.physicalRef.ref), info.Digest.String()) - logrus.Debugf("Downloading %s", path) - res, err := s.c.makeRequest(ctx, http.MethodGet, path, headers, nil, v2Auth, nil) - if err != nil { - return nil, nil, err - } - - switch res.StatusCode { - case http.StatusOK: - // if the server replied with a 200 status code, convert the full body response to a series of - // streams as it would have been done with 206. - streams := make(chan io.ReadCloser) - errs := make(chan error) - go splitHTTP200ResponseToPartial(streams, errs, res.Body, chunks) - return streams, errs, nil - case http.StatusPartialContent: - mediaType, params, err := parseMediaType(res.Header.Get("Content-Type")) - if err != nil { - return nil, nil, err - } - - streams := make(chan io.ReadCloser) - errs := make(chan error) - - go handle206Response(streams, errs, res.Body, chunks, mediaType, params) - return streams, errs, nil - case http.StatusBadRequest: - res.Body.Close() - return nil, nil, private.BadPartialRequestError{Status: res.Status} - default: - err := registryHTTPResponseToError(res) - res.Body.Close() - return nil, nil, fmt.Errorf("fetching partial blob: %w", err) - } -} - -// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). -// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. -// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. -func (s *dockerImageSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) { - return s.c.getBlob(ctx, s.physicalRef, info, cache) -} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (s *dockerImageSource) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - if err := s.c.detectProperties(ctx); err != nil { - return nil, err - } - var res []signature.Signature - switch { - case s.c.supportsSignatures: - if err := s.appendSignaturesFromAPIExtension(ctx, &res, instanceDigest); err != nil { - return nil, err - } - case s.c.signatureBase != nil: - if err := s.appendSignaturesFromLookaside(ctx, &res, instanceDigest); err != nil { - return nil, err - } - default: - return nil, errors.New("Internal error: X-Registry-Supports-Signatures extension not supported, and lookaside should not be empty configuration") - } - - if err := s.appendSignaturesFromSigstoreAttachments(ctx, &res, instanceDigest); err != nil { - return nil, err - } - return res, nil -} - -// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, -// or finally, from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { - if instanceDigest != nil { - return *instanceDigest, nil - } - if digested, ok := s.physicalRef.ref.(reference.Digested); ok { - d := digested.Digest() - if d.Algorithm() == digest.Canonical { - return d, nil - } - } - if err := s.ensureManifestIsLoaded(ctx); err != nil { - return "", err - } - return manifest.Digest(s.cachedManifest) -} - -// appendSignaturesFromLookaside implements GetSignaturesWithFormat() from the lookaside location configured in s.c.signatureBase, -// which is not nil, storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromLookaside(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - // NOTE: Keep this in sync with docs/signature-protocols.md! - for i := 0; ; i++ { - if i >= maxLookasideSignatures { - return fmt.Errorf("server provided %d signatures, assuming that's unreasonable and a server error", maxLookasideSignatures) - } - - sigURL, err := lookasideStorageURL(s.c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - signature, missing, err := s.getOneSignature(ctx, sigURL) - if err != nil { - return err - } - if missing { - break - } - *dest = append(*dest, signature) - } - return nil -} - -// getOneSignature downloads one signature from sigURL, and returns (signature, false, nil) -// If it successfully determines that the signature does not exist, returns (nil, true, nil). -// NOTE: Keep this in sync with docs/signature-protocols.md! -func (s *dockerImageSource) getOneSignature(ctx context.Context, sigURL *url.URL) (signature.Signature, bool, error) { - switch sigURL.Scheme { - case "file": - logrus.Debugf("Reading %s", sigURL.Path) - sigBlob, err := os.ReadFile(sigURL.Path) - if err != nil { - if os.IsNotExist(err) { - return nil, true, nil - } - return nil, false, err - } - sig, err := signature.FromBlob(sigBlob) - if err != nil { - return nil, false, fmt.Errorf("parsing signature %q: %w", sigURL.Path, err) - } - return sig, false, nil - - case "http", "https": - logrus.Debugf("GET %s", sigURL.Redacted()) - req, err := http.NewRequestWithContext(ctx, http.MethodGet, sigURL.String(), nil) - if err != nil { - return nil, false, err - } - res, err := s.c.client.Do(req) - if err != nil { - return nil, false, err - } - defer res.Body.Close() - if res.StatusCode == http.StatusNotFound { - logrus.Debugf("... got status 404, as expected = end of signatures") - return nil, true, nil - } else if res.StatusCode != http.StatusOK { - return nil, false, fmt.Errorf("reading signature from %s: %w", sigURL.Redacted(), newUnexpectedHTTPStatusError(res)) - } - - contentType := res.Header.Get("Content-Type") - if mimeType := simplifyContentType(contentType); mimeType == "text/html" { - logrus.Warnf("Signature %q has Content-Type %q, unexpected for a signature", sigURL.Redacted(), contentType) - // Don’t immediately fail; the lookaside spec does not place any requirements on Content-Type. - // If the content really is HTML, it’s going to fail in signature.FromBlob. - } - - sigBlob, err := iolimits.ReadAtMost(res.Body, iolimits.MaxSignatureBodySize) - if err != nil { - return nil, false, err - } - sig, err := signature.FromBlob(sigBlob) - if err != nil { - return nil, false, fmt.Errorf("parsing signature %s: %w", sigURL.Redacted(), err) - } - return sig, false, nil - - default: - return nil, false, fmt.Errorf("Unsupported scheme when reading signature from %s", sigURL.Redacted()) - } -} - -// appendSignaturesFromAPIExtension implements GetSignaturesWithFormat() using the X-Registry-Supports-Signatures API extension, -// storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromAPIExtension(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - parsedBody, err := s.c.getExtensionsSignatures(ctx, s.physicalRef, manifestDigest) - if err != nil { - return err - } - - for _, sig := range parsedBody.Signatures { - if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic { - *dest = append(*dest, signature.SimpleSigningFromBlob(sig.Content)) - } - } - return nil -} - -// appendSignaturesFromSigstoreAttachments implements GetSignaturesWithFormat() using the sigstore tag convention, -// storing the signatures to *dest. -// On error, the contents of *dest are undefined. -func (s *dockerImageSource) appendSignaturesFromSigstoreAttachments(ctx context.Context, dest *[]signature.Signature, instanceDigest *digest.Digest) error { - if !s.c.useSigstoreAttachments { - logrus.Debugf("Not looking for sigstore attachments: disabled by configuration") - return nil - } - - manifestDigest, err := s.manifestDigest(ctx, instanceDigest) - if err != nil { - return err - } - - ociManifest, err := s.c.getSigstoreAttachmentManifest(ctx, s.physicalRef, manifestDigest) - if err != nil { - return err - } - if ociManifest == nil { - return nil - } - - logrus.Debugf("Found a sigstore attachment manifest with %d layers", len(ociManifest.Layers)) - for layerIndex, layer := range ociManifest.Layers { - // Note that this copies all kinds of attachments: attestations, and whatever else is there, - // not just signatures. We leave the signature consumers to decide based on the MIME type. - logrus.Debugf("Fetching sigstore attachment %d/%d: %s", layerIndex+1, len(ociManifest.Layers), layer.Digest.String()) - // We don’t benefit from a real BlobInfoCache here because we never try to reuse/mount attachment payloads. - // That might eventually need to change if payloads grow to be not just signatures, but something - // significantly large. - payload, err := s.c.getOCIDescriptorContents(ctx, s.physicalRef, layer, iolimits.MaxSignatureBodySize, - none.NoCache) - if err != nil { - return err - } - *dest = append(*dest, signature.SigstoreFromComponents(layer.MediaType, payload, layer.Annotations)) - } - return nil -} - -// deleteImage deletes the named image from the registry, if supported. -func deleteImage(ctx context.Context, sys *types.SystemContext, ref dockerReference) error { - if ref.isUnknownDigest { - return fmt.Errorf("Docker reference without a tag or digest cannot be deleted") - } - - registryConfig, err := loadRegistryConfiguration(sys) - if err != nil { - return err - } - // docker/distribution does not document what action should be used for deleting images. - // - // Current docker/distribution requires "pull" for reading the manifest and "delete" for deleting it. - // quay.io requires "push" (an explicit "pull" is unnecessary), does not grant any token (fails parsing the request) if "delete" is included. - // OpenShift ignores the action string (both the password and the token is an OpenShift API token identifying a user). - // - // We have to hard-code a single string, luckily both docker/distribution and quay.io support "*" to mean "everything". - c, err := newDockerClientFromRef(sys, ref, registryConfig, true, "*") - if err != nil { - return err - } - defer c.Close() - - headers := map[string][]string{ - "Accept": manifest.DefaultRequestedManifestMIMETypes, - } - refTail, err := ref.tagOrDigest() - if err != nil { - return err - } - getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail) - get, err := c.makeRequest(ctx, http.MethodGet, getPath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer get.Body.Close() - switch get.StatusCode { - case http.StatusOK: - case http.StatusNotFound: - return fmt.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref) - default: - return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(get)) - } - manifestBody, err := iolimits.ReadAtMost(get.Body, iolimits.MaxManifestBodySize) - if err != nil { - return err - } - - manifestDigest, err := manifest.Digest(manifestBody) - if err != nil { - return fmt.Errorf("computing manifest digest: %w", err) - } - deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), manifestDigest) - - // When retrieving the digest from a registry >= 2.3 use the following header: - // "Accept": "application/vnd.docker.distribution.manifest.v2+json" - delete, err := c.makeRequest(ctx, http.MethodDelete, deletePath, headers, nil, v2Auth, nil) - if err != nil { - return err - } - defer delete.Body.Close() - if delete.StatusCode != http.StatusAccepted { - return fmt.Errorf("deleting %v: %w", ref.ref, registryHTTPResponseToError(delete)) - } - - for i := 0; ; i++ { - sigURL, err := lookasideStorageURL(c.signatureBase, manifestDigest, i) - if err != nil { - return err - } - missing, err := c.deleteOneSignature(sigURL) - if err != nil { - return err - } - if missing { - break - } - } - - return nil -} - -type bufferedNetworkReaderBuffer struct { - data []byte - len int - consumed int - err error -} - -type bufferedNetworkReader struct { - stream io.ReadCloser - emptyBuffer chan *bufferedNetworkReaderBuffer - readyBuffer chan *bufferedNetworkReaderBuffer - terminate chan bool - current *bufferedNetworkReaderBuffer - mutex sync.Mutex - gotEOF bool -} - -// handleBufferedNetworkReader runs in a goroutine -func handleBufferedNetworkReader(br *bufferedNetworkReader) { - defer close(br.readyBuffer) - for { - select { - case b := <-br.emptyBuffer: - b.len, b.err = br.stream.Read(b.data) - br.readyBuffer <- b - if b.err != nil { - return - } - case <-br.terminate: - return - } - } -} - -func (n *bufferedNetworkReader) Close() error { - close(n.terminate) - close(n.emptyBuffer) - return n.stream.Close() -} - -func (n *bufferedNetworkReader) read(p []byte) (int, error) { - if n.current != nil { - copied := copy(p, n.current.data[n.current.consumed:n.current.len]) - n.current.consumed += copied - if n.current.consumed == n.current.len { - n.emptyBuffer <- n.current - n.current = nil - } - if copied > 0 { - return copied, nil - } - } - if n.gotEOF { - return 0, io.EOF - } - - var b *bufferedNetworkReaderBuffer - - select { - case b = <-n.readyBuffer: - if b.err != nil { - if b.err != io.EOF { - return b.len, b.err - } - n.gotEOF = true - } - b.consumed = 0 - n.current = b - return n.read(p) - case <-n.terminate: - return 0, io.EOF - } -} - -func (n *bufferedNetworkReader) Read(p []byte) (int, error) { - n.mutex.Lock() - defer n.mutex.Unlock() - - return n.read(p) -} - -func makeBufferedNetworkReader(stream io.ReadCloser, nBuffers, bufferSize uint) *bufferedNetworkReader { - br := bufferedNetworkReader{ - stream: stream, - emptyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), - readyBuffer: make(chan *bufferedNetworkReaderBuffer, nBuffers), - terminate: make(chan bool), - } - - go func() { - handleBufferedNetworkReader(&br) - }() - - for range nBuffers { - b := bufferedNetworkReaderBuffer{ - data: make([]byte, bufferSize), - } - br.emptyBuffer <- &b - } - - return &br -} - -type signalCloseReader struct { - closed chan struct{} - stream io.ReadCloser - consumeStream bool -} - -func (s signalCloseReader) Read(p []byte) (int, error) { - return s.stream.Read(p) -} - -func (s signalCloseReader) Close() error { - defer close(s.closed) - if s.consumeStream { - if _, err := io.Copy(io.Discard, s.stream); err != nil { - s.stream.Close() - return err - } - } - return s.stream.Close() -} diff --git a/vendor/github.com/containers/image/v5/docker/docker_transport.go b/vendor/github.com/containers/image/v5/docker/docker_transport.go deleted file mode 100644 index c10463a430..0000000000 --- a/vendor/github.com/containers/image/v5/docker/docker_transport.go +++ /dev/null @@ -1,211 +0,0 @@ -package docker - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/containers/image/v5/docker/policyconfiguration" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/transports" - "github.com/containers/image/v5/types" -) - -// UnknownDigestSuffix can be appended to a reference when the caller -// wants to push an image without a tag or digest. -// NewReferenceUnknownDigest() is called when this const is detected. -const UnknownDigestSuffix = "@@unknown-digest@@" - -func init() { - transports.Register(Transport) -} - -// Transport is an ImageTransport for container registry-hosted images. -var Transport = dockerTransport{} - -type dockerTransport struct{} - -func (t dockerTransport) Name() string { - return "docker" -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) { - return ParseReference(reference) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error { - // FIXME? We could be verifying the various character set and length restrictions - // from docker/distribution/reference.regexp.go, but other than that there - // are few semantically invalid strings. - return nil -} - -// dockerReference is an ImageReference for Docker images. -type dockerReference struct { - ref reference.Named // By construction we know that !reference.IsNameOnly(ref) unless isUnknownDigest=true - isUnknownDigest bool -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference. -func ParseReference(refString string) (types.ImageReference, error) { - refString, ok := strings.CutPrefix(refString, "//") - if !ok { - return nil, fmt.Errorf("docker: image reference %s does not start with //", refString) - } - refString, unknownDigest := strings.CutSuffix(refString, UnknownDigestSuffix) - ref, err := reference.ParseNormalizedNamed(refString) - if err != nil { - return nil, err - } - - if unknownDigest { - if !reference.IsNameOnly(ref) { - return nil, fmt.Errorf("docker: image reference %q has unknown digest set but it contains either a tag or digest", ref.String()+UnknownDigestSuffix) - } - return NewReferenceUnknownDigest(ref) - } - - ref = reference.TagNameOnly(ref) - return NewReference(ref) -} - -// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly(). -func NewReference(ref reference.Named) (types.ImageReference, error) { - return newReference(ref, false) -} - -// NewReferenceUnknownDigest returns a Docker reference for a named reference, which can be used to write images without setting -// a tag on the registry. The reference must satisfy reference.IsNameOnly() -func NewReferenceUnknownDigest(ref reference.Named) (types.ImageReference, error) { - return newReference(ref, true) -} - -// newReference returns a dockerReference for a named reference. -func newReference(ref reference.Named, unknownDigest bool) (dockerReference, error) { - if reference.IsNameOnly(ref) && !unknownDigest { - return dockerReference{}, fmt.Errorf("Docker reference %s is not for an unknown digest case; tag or digest is needed", reference.FamiliarString(ref)) - } - if !reference.IsNameOnly(ref) && unknownDigest { - return dockerReference{}, fmt.Errorf("Docker reference %s is for an unknown digest case but reference has a tag or digest", reference.FamiliarString(ref)) - } - // A github.com/distribution/reference value can have a tag and a digest at the same time! - // The docker/distribution API does not really support that (we can’t ask for an image with a specific - // tag and digest), so fail. This MAY be accepted in the future. - // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop - // the tag or the digest first?) - _, isTagged := ref.(reference.NamedTagged) - _, isDigested := ref.(reference.Canonical) - if isTagged && isDigested { - return dockerReference{}, errors.New("Docker references with both a tag and digest are currently not supported") - } - - return dockerReference{ - ref: ref, - isUnknownDigest: unknownDigest, - }, nil -} - -func (ref dockerReference) Transport() types.ImageTransport { - return Transport -} - -// StringWithinTransport returns a string representation of the reference, which MUST be such that -// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. -// NOTE: The returned string is not promised to be equal to the original input to ParseReference; -// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. -// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix. -func (ref dockerReference) StringWithinTransport() string { - famString := "//" + reference.FamiliarString(ref.ref) - if ref.isUnknownDigest { - return famString + UnknownDigestSuffix - } - return famString -} - -// DockerReference returns a Docker reference associated with this reference -// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, -// not e.g. after redirect or alias processing), or nil if unknown/not applicable. -func (ref dockerReference) DockerReference() reference.Named { - return ref.ref -} - -// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. -// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; -// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical -// (i.e. various references with exactly the same semantics should return the same configuration identity) -// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but -// not required/guaranteed that it will be a valid input to Transport().ParseReference(). -// Returns "" if configuration identities for these references are not supported. -func (ref dockerReference) PolicyConfigurationIdentity() string { - if ref.isUnknownDigest { - return ref.ref.Name() - } - res, err := policyconfiguration.DockerReferenceIdentity(ref.ref) - if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure. - panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err)) - } - return res -} - -// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search -// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed -// in order, terminating on first match, and an implicit "" is always checked at the end. -// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), -// and each following element to be a prefix of the element preceding it. -func (ref dockerReference) PolicyConfigurationNamespaces() []string { - namespaces := policyconfiguration.DockerReferenceNamespaces(ref.ref) - if ref.isUnknownDigest { - if len(namespaces) != 0 && namespaces[0] == ref.ref.Name() { - namespaces = namespaces[1:] - } - } - return namespaces -} - -// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned ImageCloser. -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. -func (ref dockerReference) NewImage(ctx context.Context, sys *types.SystemContext) (types.ImageCloser, error) { - return newImage(ctx, sys, ref) -} - -// NewImageSource returns a types.ImageSource for this reference. -// The caller must call .Close() on the returned ImageSource. -func (ref dockerReference) NewImageSource(ctx context.Context, sys *types.SystemContext) (types.ImageSource, error) { - return newImageSource(ctx, sys, ref) -} - -// NewImageDestination returns a types.ImageDestination for this reference. -// The caller must call .Close() on the returned ImageDestination. -func (ref dockerReference) NewImageDestination(ctx context.Context, sys *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(sys, ref) -} - -// DeleteImage deletes the named image from the registry, if supported. -func (ref dockerReference) DeleteImage(ctx context.Context, sys *types.SystemContext) error { - return deleteImage(ctx, sys, ref) -} - -// tagOrDigest returns a tag or digest from the reference. -func (ref dockerReference) tagOrDigest() (string, error) { - if ref, ok := ref.ref.(reference.Canonical); ok { - return ref.Digest().String(), nil - } - if ref, ok := ref.ref.(reference.NamedTagged); ok { - return ref.Tag(), nil - } - - if ref.isUnknownDigest { - return "", fmt.Errorf("Docker reference %q is for an unknown digest case, has neither a digest nor a tag", reference.FamiliarString(ref.ref)) - } - // This should not happen, NewReference above refuses reference.IsNameOnly values. - return "", fmt.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref)) -} diff --git a/vendor/github.com/containers/image/v5/docker/errors.go b/vendor/github.com/containers/image/v5/docker/errors.go deleted file mode 100644 index 1ed40b87f7..0000000000 --- a/vendor/github.com/containers/image/v5/docker/errors.go +++ /dev/null @@ -1,102 +0,0 @@ -package docker - -import ( - "errors" - "fmt" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/sirupsen/logrus" -) - -var ( - // ErrV1NotSupported is returned when we're trying to talk to a - // docker V1 registry. - // Deprecated: The V1 container registry detection is no longer performed, so this error is never returned. - ErrV1NotSupported = errors.New("can't talk to a V1 container registry") - // ErrTooManyRequests is returned when the status code returned is 429 - ErrTooManyRequests = errors.New("too many requests to registry") -) - -// ErrUnauthorizedForCredentials is returned when the status code returned is 401 -type ErrUnauthorizedForCredentials struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ErrUnauthorizedForCredentials) Error() string { - return fmt.Sprintf("unable to retrieve auth token: invalid username/password: %s", e.Err.Error()) -} - -// httpResponseToError translates the https.Response into an error, possibly prefixing it with the supplied context. It returns -// nil if the response is not considered an error. -// NOTE: Almost all callers in this package should use registryHTTPResponseToError instead. -func httpResponseToError(res *http.Response, context string) error { - switch res.StatusCode { - case http.StatusOK: - return nil - case http.StatusTooManyRequests: - return ErrTooManyRequests - case http.StatusUnauthorized: - err := registryHTTPResponseToError(res) - return ErrUnauthorizedForCredentials{Err: err} - default: - if context == "" { - return newUnexpectedHTTPStatusError(res) - } - return fmt.Errorf("%s: %w", context, newUnexpectedHTTPStatusError(res)) - } -} - -// registryHTTPResponseToError creates a Go error from an HTTP error response of a docker/distribution -// registry. -// -// WARNING: The OCI distribution spec says -// “A `4XX` response code from the registry MAY return a body in any format.”; but if it is -// JSON, it MUST use the errcode.Error structure. -// So, callers should primarily decide based on HTTP StatusCode, not based on error type here. -func registryHTTPResponseToError(res *http.Response) error { - err := handleErrorResponse(res) - // len(errs) == 0 should never be returned by handleErrorResponse; if it does, we don't modify it and let the caller report it as is. - if errs, ok := err.(errcode.Errors); ok && len(errs) > 0 { - // The docker/distribution registry implementation almost never returns - // more than one error in the HTTP body; it seems there is only one - // possible instance, where the second error reports a cleanup failure - // we don't really care about. - // - // The only _common_ case where a multi-element error is returned is - // created by the handleErrorResponse parser when OAuth authorization fails: - // the first element contains errors from a WWW-Authenticate header, the second - // element contains errors from the response body. - // - // In that case the first one is currently _slightly_ more informative (ErrorCodeUnauthorized - // for invalid tokens, ErrorCodeDenied for permission denied with a valid token - // for the first error, vs. ErrorCodeUnauthorized for both cases for the second error.) - // - // Also, docker/docker similarly only logs the other errors and returns the - // first one. - if len(errs) > 1 { - logrus.Debugf("Discarding non-primary errors:") - for _, err := range errs[1:] { - logrus.Debugf(" %s", err.Error()) - } - } - err = errs[0] - } - switch e := err.(type) { - case *unexpectedHTTPResponseError: - response := string(e.Response) - if len(response) > 50 { - response = response[:50] + "..." - } - // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("StatusCode: %d, %q%.0w", e.StatusCode, response, e) - case errcode.Error: - // e.Error() is fmt.Sprintf("%s: %s", e.Code.Error(), e.Message, which is usually - // rather redundant. So reword it without using e.Code.Error() if e.Message is the default. - if e.Message == e.Code.Message() { - // %.0w makes e visible to error.Unwrap() without including any text - err = fmt.Errorf("%s%.0w", e.Message, e) - } - } - return err -} diff --git a/vendor/github.com/containers/image/v5/docker/paths_common.go b/vendor/github.com/containers/image/v5/docker/paths_common.go deleted file mode 100644 index d9993630bc..0000000000 --- a/vendor/github.com/containers/image/v5/docker/paths_common.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !freebsd - -package docker - -const etcDir = "/etc" diff --git a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go b/vendor/github.com/containers/image/v5/docker/paths_freebsd.go deleted file mode 100644 index 8f0f2eee88..0000000000 --- a/vendor/github.com/containers/image/v5/docker/paths_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build freebsd - -package docker - -const etcDir = "/usr/local/etc" diff --git a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go deleted file mode 100644 index e1f1f1f2b7..0000000000 --- a/vendor/github.com/containers/image/v5/docker/policyconfiguration/naming.go +++ /dev/null @@ -1,78 +0,0 @@ -package policyconfiguration - -import ( - "errors" - "fmt" - "strings" - - "github.com/containers/image/v5/docker/reference" -) - -// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceIdentity(ref reference.Named) (string, error) { - res := ref.Name() - tagged, isTagged := ref.(reference.NamedTagged) - digested, isDigested := ref.(reference.Canonical) - switch { - case isTagged && isDigested: // Note that this CAN actually happen. - return "", fmt.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref)) - case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly() - return "", fmt.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref)) - case isTagged: - res = res + ":" + tagged.Tag() - case isDigested: - res = res + "@" + digested.Digest().String() - default: // Coverage: The above was supposed to be exhaustive. - return "", errors.New("Internal inconsistency, unexpected default branch") - } - return res, nil -} - -// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search, -// as a backend for ImageReference.PolicyConfigurationIdentity. -// The reference must satisfy !reference.IsNameOnly(). -func DockerReferenceNamespaces(ref reference.Named) []string { - // Look for a match of the repository, and then of the possible parent - // namespaces. Note that this only happens on the expanded host names - // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox", - // then in its parent "docker.io/library"; in none of "busybox", - // un-namespaced "library" nor in "" supposedly implicitly representing "library/". - // - // ref.Name() == ref.Domain() + "/" + ref.Path(), so the last - // iteration matches the host name (for any namespace). - res := []string{} - name := ref.Name() - for { - res = append(res, name) - - lastSlash := strings.LastIndex(name, "/") - if lastSlash == -1 { - break - } - name = name[:lastSlash] - } - - // Strip port number if any, before appending to res slice. - // Currently, the most compatible behavior is to return - // example.com:8443/ns, example.com:8443, *.com. - // If a port number is not specified, the expected behavior would be - // example.com/ns, example.com, *.com - portNumColon := strings.Index(name, ":") - if portNumColon != -1 { - name = name[:portNumColon] - } - - // Append wildcarded domains to res slice - for { - firstDot := strings.Index(name, ".") - if firstDot == -1 { - break - } - name = name[firstDot+1:] - - res = append(res, "*."+name) - } - return res -} diff --git a/vendor/github.com/containers/image/v5/docker/reference/README.md b/vendor/github.com/containers/image/v5/docker/reference/README.md deleted file mode 100644 index 3c4d74eb4d..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/README.md +++ /dev/null @@ -1,2 +0,0 @@ -This is a copy of github.com/docker/distribution/reference as of commit 3226863cbcba6dbc2f6c83a37b28126c934af3f8, -except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset. \ No newline at end of file diff --git a/vendor/github.com/containers/image/v5/docker/reference/helpers.go b/vendor/github.com/containers/image/v5/docker/reference/helpers.go deleted file mode 100644 index 978df7eabb..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/helpers.go +++ /dev/null @@ -1,42 +0,0 @@ -package reference - -import "path" - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// FamiliarName returns the familiar name string -// for the given named, familiarizing if needed. -func FamiliarName(ref Named) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().Name() - } - return ref.Name() -} - -// FamiliarString returns the familiar string representation -// for the given reference, familiarizing if needed. -func FamiliarString(ref Reference) string { - if nn, ok := ref.(normalizedNamed); ok { - return nn.Familiar().String() - } - return ref.String() -} - -// FamiliarMatch reports whether ref matches the specified pattern. -// See https://godoc.org/path#Match for supported patterns. -func FamiliarMatch(pattern string, ref Reference) (bool, error) { - matched, err := path.Match(pattern, FamiliarString(ref)) - if namedRef, isNamed := ref.(Named); isNamed && !matched { - matched, _ = path.Match(pattern, FamiliarName(namedRef)) - } - return matched, err -} diff --git a/vendor/github.com/containers/image/v5/docker/reference/normalize.go b/vendor/github.com/containers/image/v5/docker/reference/normalize.go deleted file mode 100644 index d3f47d210f..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/normalize.go +++ /dev/null @@ -1,181 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -var ( - legacyDefaultDomain = "index.docker.io" - defaultDomain = "docker.io" - officialRepoName = "library" - defaultTag = "latest" -) - -// normalizedNamed represents a name which has been -// normalized and has a familiar form. A familiar name -// is what is used in Docker UI. An example normalized -// name is "docker.io/library/ubuntu" and corresponding -// familiar name of "ubuntu". -type normalizedNamed interface { - Named - Familiar() Named -} - -// ParseNormalizedNamed parses a string into a named reference -// transforming a familiar name from Docker UI to a fully -// qualified reference. If the value may be an identifier -// use ParseAnyReference. -func ParseNormalizedNamed(s string) (Named, error) { - if ok := anchoredIdentifierRegexp.MatchString(s); ok { - return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) - } - domain, remainder := splitDockerDomain(s) - var remoteName string - if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { - remoteName = remainder[:tagSep] - } else { - remoteName = remainder - } - if strings.ToLower(remoteName) != remoteName { - return nil, errors.New("invalid reference format: repository name must be lowercase") - } - - ref, err := Parse(domain + "/" + remainder) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// ParseDockerRef normalizes the image reference following the docker convention. This is added -// mainly for backward compatibility. -// The reference returned can only be either tagged or digested. For reference contains both tag -// and digest, the function returns digested reference, e.g. docker.io/library/busybox:latest@ -// sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa will be returned as -// docker.io/library/busybox@sha256:7cc4b5aefd1d0cadf8d97d4350462ba51c694ebca145b08d7d41b41acc8db5aa. -func ParseDockerRef(ref string) (Named, error) { - named, err := ParseNormalizedNamed(ref) - if err != nil { - return nil, err - } - if _, ok := named.(NamedTagged); ok { - if canonical, ok := named.(Canonical); ok { - // The reference is both tagged and digested, only - // return digested. - newNamed, err := WithName(canonical.Name()) - if err != nil { - return nil, err - } - newCanonical, err := WithDigest(newNamed, canonical.Digest()) - if err != nil { - return nil, err - } - return newCanonical, nil - } - } - return TagNameOnly(named), nil -} - -// splitDockerDomain splits a repository name to domain and remotename string. -// If no valid domain is found, the default domain is used. Repository name -// needs to be already validated before. -func splitDockerDomain(name string) (domain, remainder string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - domain, remainder = defaultDomain, name - } else { - domain, remainder = name[:i], name[i+1:] - } - if domain == legacyDefaultDomain { - domain = defaultDomain - } - if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { - remainder = officialRepoName + "/" + remainder - } - return -} - -// familiarizeName returns a shortened version of the name familiar -// to the Docker UI. Familiar names have the default domain -// "docker.io" and "library/" repository prefix removed. -// For example, "docker.io/library/redis" will have the familiar -// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". -// Returns a familiarized named only reference. -func familiarizeName(named namedRepository) repository { - repo := repository{ - domain: named.Domain(), - path: named.Path(), - } - - if repo.domain == defaultDomain { - repo.domain = "" - // Handle official repositories which have the pattern "library/" - if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { - repo.path = split[1] - } - } - return repo -} - -func (r reference) Familiar() Named { - return reference{ - namedRepository: familiarizeName(r.namedRepository), - tag: r.tag, - digest: r.digest, - } -} - -func (r repository) Familiar() Named { - return familiarizeName(r) -} - -func (t taggedReference) Familiar() Named { - return taggedReference{ - namedRepository: familiarizeName(t.namedRepository), - tag: t.tag, - } -} - -func (c canonicalReference) Familiar() Named { - return canonicalReference{ - namedRepository: familiarizeName(c.namedRepository), - digest: c.digest, - } -} - -// TagNameOnly adds the default tag "latest" to a reference if it only has -// a repo name. -func TagNameOnly(ref Named) Named { - if IsNameOnly(ref) { - namedTagged, err := WithTag(ref, defaultTag) - if err != nil { - // Default tag must be valid, to create a NamedTagged - // type with non-validated input the WithTag function - // should be used instead - panic(err) - } - return namedTagged - } - return ref -} - -// ParseAnyReference parses a reference string as a possible identifier, -// full digest, or familiar name. -func ParseAnyReference(ref string) (Reference, error) { - if ok := anchoredIdentifierRegexp.MatchString(ref); ok { - return digestReference("sha256:" + ref), nil - } - if dgst, err := digest.Parse(ref); err == nil { - return digestReference(dgst), nil - } - - return ParseNormalizedNamed(ref) -} diff --git a/vendor/github.com/containers/image/v5/docker/reference/reference.go b/vendor/github.com/containers/image/v5/docker/reference/reference.go deleted file mode 100644 index 6c5484c068..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/reference.go +++ /dev/null @@ -1,433 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [domain '/'] path-component ['/' path-component]* -// domain := domain-component ['.' domain-component]* [':' port-number] -// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// path-component := alphanumeric [separator alphanumeric]* -// alphanumeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -// -// identifier := /[a-f0-9]{64}/ -// short-identifier := /[a-f0-9]{6,64}/ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/opencontainers/go-digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. - ErrNameContainsUppercase = errors.New("repository name must be lowercase") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) - - // ErrNameNotCanonical is returned when a name is not canonical. - ErrNameNotCanonical = errors.New("repository name must be canonical") -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with domain and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// namedRepository is a reference to a repository with a name. -// A namedRepository has both domain and path components. -type namedRepository interface { - Named - Domain() string - Path() string -} - -// Domain returns the domain part of the Named reference -func Domain(named Named) string { - if r, ok := named.(namedRepository); ok { - return r.Domain() - } - domain, _ := splitDomain(named.Name()) - return domain -} - -// Path returns the name without the domain part of the Named reference -func Path(named Named) (name string) { - if r, ok := named.(namedRepository); ok { - return r.Path() - } - _, path := splitDomain(named.Name()) - return path -} - -func splitDomain(name string) (string, string) { - match := anchoredNameRegexp.FindStringSubmatch(name) - if len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -// Deprecated: Use Domain or Path -func SplitHostname(named Named) (string, string) { - if r, ok := named.(namedRepository); ok { - return r.Domain(), r.Path() - } - return splitDomain(named.Name()) -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { - return nil, ErrNameContainsUppercase - } - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - var repo repository - - nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) - if len(nameMatch) == 3 { - repo.domain = nameMatch[1] - repo.path = nameMatch[2] - } else { - repo.domain = "" - repo.path = matches[1] - } - - ref := reference{ - namedRepository: repo, - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.Parse(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name and be in the canonical -// form, otherwise an error is returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - named, err := ParseNormalizedNamed(s) - if err != nil { - return nil, err - } - if named.String() != s { - return nil, ErrNameNotCanonical - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return nil, ErrReferenceInvalidFormat - } - return repository{ - domain: match[1], - path: match[2], - }, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if canonical, ok := name.(Canonical); ok { - return reference{ - namedRepository: repo, - tag: tag, - digest: canonical.Digest(), - }, nil - } - return taggedReference{ - namedRepository: repo, - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - var repo repository - if r, ok := name.(namedRepository); ok { - repo.domain = r.Domain() - repo.path = r.Path() - } else { - repo.path = name.Name() - } - if tagged, ok := name.(Tagged); ok { - return reference{ - namedRepository: repo, - tag: tagged.Tag(), - digest: digest, - }, nil - } - return canonicalReference{ - namedRepository: repo, - digest: digest, - }, nil -} - -// TrimNamed removes any tag or digest from the named reference. -func TrimNamed(ref Named) Named { - domain, path := SplitHostname(ref) - return repository{ - domain: domain, - path: path, - } -} - -func getBestReferenceType(ref reference) Reference { - if ref.Name() == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - namedRepository: ref.namedRepository, - digest: ref.digest, - } - } - return ref.namedRepository - } - if ref.digest == "" { - return taggedReference{ - namedRepository: ref.namedRepository, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - namedRepository - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.Name() + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository struct { - domain string - path string -} - -func (r repository) String() string { - return r.Name() -} - -func (r repository) Name() string { - if r.domain == "" { - return r.path - } - return r.domain + "/" + r.path -} - -func (r repository) Domain() string { - return r.domain -} - -func (r repository) Path() string { - return r.path -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return digest.Digest(d).String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - namedRepository - tag string -} - -func (t taggedReference) String() string { - return t.Name() + ":" + t.tag -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - namedRepository - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.Name() + "@" + c.digest.String() -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go b/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go deleted file mode 100644 index 7b15871f7b..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/regexp-additions.go +++ /dev/null @@ -1,6 +0,0 @@ -package reference - -// Return true if the specified string fully matches `IdentifierRegexp`. -func IsFullIdentifier(s string) bool { - return anchoredIdentifierRegexp.MatchString(s) -} diff --git a/vendor/github.com/containers/image/v5/docker/reference/regexp.go b/vendor/github.com/containers/image/v5/docker/reference/regexp.go deleted file mode 100644 index 76ba5c2d5c..0000000000 --- a/vendor/github.com/containers/image/v5/docker/reference/regexp.go +++ /dev/null @@ -1,156 +0,0 @@ -package reference - -import ( - "regexp" - "strings" - - storageRegexp "github.com/containers/storage/pkg/regexp" -) - -const ( - // alphaNumeric defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumeric = `[a-z0-9]+` - - // separator defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. Repeated dashes and underscores are intentionally treated - // differently. In order to support valid hostnames as name components, - // supporting repeated dash was added. Additionally double underscore is - // now allowed as a separator to loosen the restriction for previously - // supported names. - separator = `(?:[._]|__|[-]*)` - - // repository name to start with a component as defined by DomainRegexp - // and followed by an optional port. - domainComponent = `(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])` - - // The string counterpart for TagRegexp. - tag = `[\w][\w.-]{0,127}` - - // The string counterpart for DigestRegexp. - digestPat = `[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}` - - // The string counterpart for IdentifierRegexp. - identifier = `([a-f0-9]{64})` - - // The string counterpart for ShortIdentifierRegexp. - shortIdentifier = `([a-f0-9]{6,64})` -) - -var ( - // nameComponent restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponent = expression( - alphaNumeric, - optional(repeated(separator, alphaNumeric))) - - domain = expression( - domainComponent, - optional(repeated(literal(`.`), domainComponent)), - optional(literal(`:`), `[0-9]+`)) - // DomainRegexp defines the structure of potential domain components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - DomainRegexp = re(domain) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = re(tag) - - anchoredTag = anchored(tag) - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = storageRegexp.Delayed(anchoredTag) - - // DigestRegexp matches valid digests. - DigestRegexp = re(digestPat) - - anchoredDigest = anchored(digestPat) - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = storageRegexp.Delayed(anchoredDigest) - - namePat = expression( - optional(domain, literal(`/`)), - nameComponent, - optional(repeated(literal(`/`), nameComponent))) - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the domain and name part omitting - // the separating forward slash from either. - NameRegexp = re(namePat) - - anchoredName = anchored( - optional(capture(domain), literal(`/`)), - capture(nameComponent, - optional(repeated(literal(`/`), nameComponent)))) - // anchoredNameRegexp is used to parse a name value, capturing the - // domain and trailing components. - anchoredNameRegexp = storageRegexp.Delayed(anchoredName) - - referencePat = anchored(capture(namePat), - optional(literal(":"), capture(tag)), - optional(literal("@"), capture(digestPat))) - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = re(referencePat) - - // IdentifierRegexp is the format for string identifier used as a - // content addressable identifier using sha256. These identifiers - // are like digests without the algorithm, since sha256 is used. - IdentifierRegexp = re(identifier) - - // ShortIdentifierRegexp is the format used to represent a prefix - // of an identifier. A prefix may be used to match a sha256 identifier - // within a list of trusted identifiers. - ShortIdentifierRegexp = re(shortIdentifier) - - anchoredIdentifier = anchored(identifier) - // anchoredIdentifierRegexp is used to check or match an - // identifier value, anchored at start and end of string. - anchoredIdentifierRegexp = storageRegexp.Delayed(anchoredIdentifier) -) - -// re compiles the string to a regular expression. -var re = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) string { - return regexp.QuoteMeta(s) -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...string) string { - return strings.Join(res, "") -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...string) string { - return group(expression(res...)) + `?` -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...string) string { - return group(expression(res...)) + `+` -} - -// group wraps the regexp in a non-capturing group. -func group(res ...string) string { - return `(?:` + expression(res...) + `)` -} - -// capture wraps the expression in a capturing group. -func capture(res ...string) string { - return `(` + expression(res...) + `)` -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...string) string { - return `^` + expression(res...) + `$` -} diff --git a/vendor/github.com/containers/image/v5/docker/registries_d.go b/vendor/github.com/containers/image/v5/docker/registries_d.go deleted file mode 100644 index 89d48cc4fe..0000000000 --- a/vendor/github.com/containers/image/v5/docker/registries_d.go +++ /dev/null @@ -1,303 +0,0 @@ -package docker - -import ( - "errors" - "fmt" - "io/fs" - "net/url" - "os" - "path" - "path/filepath" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" - "gopkg.in/yaml.v3" -) - -// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/docker.systemRegistriesDirPath=$your_path' -var systemRegistriesDirPath = builtinRegistriesDirPath - -// builtinRegistriesDirPath is the path to registries.d. -// DO NOT change this, instead see systemRegistriesDirPath above. -const builtinRegistriesDirPath = etcDir + "/containers/registries.d" - -// userRegistriesDirPath is the path to the per user registries.d. -var userRegistriesDir = filepath.FromSlash(".config/containers/registries.d") - -// defaultUserDockerDir is the default lookaside directory for unprivileged user -var defaultUserDockerDir = filepath.FromSlash(".local/share/containers/sigstore") - -// defaultDockerDir is the default lookaside directory for root -var defaultDockerDir = "/var/lib/containers/sigstore" - -// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all. -// NOTE: Keep this in sync with docs/registries.d.md! -type registryConfiguration struct { - DefaultDocker *registryNamespace `yaml:"default-docker"` - // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*), - Docker map[string]registryNamespace `yaml:"docker"` -} - -// registryNamespace defines lookaside locations for a single namespace. -type registryNamespace struct { - Lookaside string `yaml:"lookaside"` // For reading, and if LookasideStaging is not present, for writing. - LookasideStaging string `yaml:"lookaside-staging"` // For writing only. - SigStore string `yaml:"sigstore"` // For compatibility, deprecated in favor of Lookaside. - SigStoreStaging string `yaml:"sigstore-staging"` // For compatibility, deprecated in favor of LookasideStaging. - UseSigstoreAttachments *bool `yaml:"use-sigstore-attachments,omitempty"` -} - -// lookasideStorageBase is an "opaque" type representing a lookaside Docker signature storage. -// Users outside of this file should use SignatureStorageBaseURL and lookasideStorageURL below. -type lookasideStorageBase *url.URL - -// SignatureStorageBaseURL reads configuration to find an appropriate lookaside storage URL for ref, for write access if “write”. -// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md -// Warning: This function only exposes configuration in registries.d; -// just because this function returns an URL does not mean that the URL will be used by c/image/docker (e.g. if the registry natively supports X-R-S-S). -func SignatureStorageBaseURL(sys *types.SystemContext, ref types.ImageReference, write bool) (*url.URL, error) { - dr, ok := ref.(dockerReference) - if !ok { - return nil, errors.New("ref must be a dockerReference") - } - config, err := loadRegistryConfiguration(sys) - if err != nil { - return nil, err - } - - return config.lookasideStorageBaseURL(dr, write) -} - -// loadRegistryConfiguration returns a registryConfiguration appropriate for sys. -func loadRegistryConfiguration(sys *types.SystemContext) (*registryConfiguration, error) { - dirPath := registriesDirPath(sys) - logrus.Debugf(`Using registries.d directory %s`, dirPath) - return loadAndMergeConfig(dirPath) -} - -// registriesDirPath returns a path to registries.d -func registriesDirPath(sys *types.SystemContext) string { - return registriesDirPathWithHomeDir(sys, homedir.Get()) -} - -// registriesDirPathWithHomeDir is an internal implementation detail of registriesDirPath, -// it exists only to allow testing it with an artificial home directory. -func registriesDirPathWithHomeDir(sys *types.SystemContext, homeDir string) string { - if sys != nil && sys.RegistriesDirPath != "" { - return sys.RegistriesDirPath - } - userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) - if err := fileutils.Exists(userRegistriesDirPath); err == nil { - return userRegistriesDirPath - } - if sys != nil && sys.RootForImplicitAbsolutePaths != "" { - return filepath.Join(sys.RootForImplicitAbsolutePaths, systemRegistriesDirPath) - } - - return systemRegistriesDirPath -} - -// loadAndMergeConfig loads configuration files in dirPath -// FIXME: Probably rename to loadRegistryConfigurationForPath -func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) { - mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}} - dockerDefaultMergedFrom := "" - nsMergedFrom := map[string]string{} - - dir, err := os.Open(dirPath) - if err != nil { - if os.IsNotExist(err) { - return &mergedConfig, nil - } - return nil, err - } - configNames, err := dir.Readdirnames(0) - if err != nil { - return nil, err - } - for _, configName := range configNames { - if !strings.HasSuffix(configName, ".yaml") { - continue - } - configPath := filepath.Join(dirPath, configName) - configBytes, err := os.ReadFile(configPath) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return nil, err - } - - var config registryConfiguration - err = yaml.Unmarshal(configBytes, &config) - if err != nil { - return nil, fmt.Errorf("parsing %s: %w", configPath, err) - } - - if config.DefaultDocker != nil { - if mergedConfig.DefaultDocker != nil { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in %q and %q`, - dockerDefaultMergedFrom, configPath) - } - mergedConfig.DefaultDocker = config.DefaultDocker - dockerDefaultMergedFrom = configPath - } - - for nsName, nsConfig := range config.Docker { // includes config.Docker == nil - if _, ok := mergedConfig.Docker[nsName]; ok { - return nil, fmt.Errorf(`Error parsing signature storage configuration: "docker" namespace %q defined both in %q and %q`, - nsName, nsMergedFrom[nsName], configPath) - } - mergedConfig.Docker[nsName] = nsConfig - nsMergedFrom[nsName] = configPath - } - } - - return &mergedConfig, nil -} - -// lookasideStorageBaseURL returns an appropriate signature storage URL for ref, for write access if “write”. -// the usage of the BaseURL is defined under docker/distribution registries—separate storage of docs/signature-protocols.md -func (config *registryConfiguration) lookasideStorageBaseURL(dr dockerReference, write bool) (*url.URL, error) { - topLevel := config.signatureTopLevel(dr, write) - var baseURL *url.URL - if topLevel != "" { - u, err := url.Parse(topLevel) - if err != nil { - return nil, fmt.Errorf("Invalid signature storage URL %s: %w", topLevel, err) - } - baseURL = u - } else { - // returns default directory if no lookaside specified in configuration file - baseURL = builtinDefaultLookasideStorageDir(rootless.GetRootlessEUID()) - logrus.Debugf(" No signature storage configuration found for %s, using built-in default %s", dr.PolicyConfigurationIdentity(), baseURL.Redacted()) - } - // NOTE: Keep this in sync with docs/signature-protocols.md! - // FIXME? Restrict to explicitly supported schemes? - repo := reference.Path(dr.ref) // Note that this is without a tag or digest. - if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references - return nil, fmt.Errorf("Unexpected path elements in Docker reference %s for signature storage", dr.ref.String()) - } - baseURL.Path = baseURL.Path + "/" + repo - return baseURL, nil -} - -// builtinDefaultLookasideStorageDir returns default signature storage URL as per euid -func builtinDefaultLookasideStorageDir(euid int) *url.URL { - if euid != 0 { - return &url.URL{Scheme: "file", Path: filepath.Join(homedir.Get(), defaultUserDockerDir)} - } - return &url.URL{Scheme: "file", Path: defaultDockerDir} -} - -// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”. -// (the top level of the storage, namespaced by repo.FullName etc.), or "" if nothing has been configured. -func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, identity) - if ret := ns.signatureTopLevel(write); ret != "" { - return ret - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Lookaside configuration: using "docker" namespace %s`, name) - if ret := ns.signatureTopLevel(write); ret != "" { - return ret - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Lookaside configuration: using "default-docker" configuration`) - if ret := config.DefaultDocker.signatureTopLevel(write); ret != "" { - return ret - } - } - return "" -} - -// config.useSigstoreAttachments returns whether we should look for and write sigstore attachments. -// for ref. -func (config *registryConfiguration) useSigstoreAttachments(ref dockerReference) bool { - if config.Docker != nil { - // Look for a full match. - identity := ref.PolicyConfigurationIdentity() - if ns, ok := config.Docker[identity]; ok { - logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, identity) - if ns.UseSigstoreAttachments != nil { - return *ns.UseSigstoreAttachments - } - } - - // Look for a match of the possible parent namespaces. - for _, name := range ref.PolicyConfigurationNamespaces() { - if ns, ok := config.Docker[name]; ok { - logrus.Debugf(` Sigstore attachments: using "docker" namespace %s`, name) - if ns.UseSigstoreAttachments != nil { - return *ns.UseSigstoreAttachments - } - } - } - } - // Look for a default location - if config.DefaultDocker != nil { - logrus.Debugf(` Sigstore attachments: using "default-docker" configuration`) - if config.DefaultDocker.UseSigstoreAttachments != nil { - return *config.DefaultDocker.UseSigstoreAttachments - } - } - return false -} - -// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”. -// or "" if nothing has been configured. -func (ns registryNamespace) signatureTopLevel(write bool) string { - if write { - if ns.LookasideStaging != "" { - logrus.Debugf(` Using "lookaside-staging" %s`, ns.LookasideStaging) - return ns.LookasideStaging - } - if ns.SigStoreStaging != "" { - logrus.Debugf(` Using "sigstore-staging" %s`, ns.SigStoreStaging) - return ns.SigStoreStaging - } - } - if ns.Lookaside != "" { - logrus.Debugf(` Using "lookaside" %s`, ns.Lookaside) - return ns.Lookaside - } - if ns.SigStore != "" { - logrus.Debugf(` Using "sigstore" %s`, ns.SigStore) - return ns.SigStore - } - return "" -} - -// lookasideStorageURL returns an URL usable for accessing signature index in base with known manifestDigest. -// base is not nil from the caller -// NOTE: Keep this in sync with docs/signature-protocols.md! -func lookasideStorageURL(base lookasideStorageBase, manifestDigest digest.Digest, index int) (*url.URL, error) { - if err := manifestDigest.Validate(); err != nil { // digest.Digest.Encoded() panics on failure, and could possibly result in a path with ../, so validate explicitly. - return nil, err - } - sigURL := *base - sigURL.Path = fmt.Sprintf("%s@%s=%s/signature-%d", sigURL.Path, manifestDigest.Algorithm(), manifestDigest.Encoded(), index+1) - return &sigURL, nil -} diff --git a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go b/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go deleted file mode 100644 index f5fed07b89..0000000000 --- a/vendor/github.com/containers/image/v5/docker/wwwauthenticate.go +++ /dev/null @@ -1,175 +0,0 @@ -package docker - -// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies. - -import ( - "fmt" - "iter" - "net/http" - "strings" -) - -// challenge carries information from a WWW-Authenticate response header. -// See RFC 7235. -type challenge struct { - // Scheme is the auth-scheme according to RFC 7235 - Scheme string - - // Parameters are the auth-params according to RFC 7235 - Parameters map[string]string -} - -// Octet types from RFC 7230. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) - if strings.ContainsRune(" \t\r\n", rune(c)) { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -func iterateAuthHeader(header http.Header) iter.Seq[challenge] { - return func(yield func(challenge) bool) { - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - if !yield(challenge{Scheme: v, Parameters: p}) { - return - } - } - } - } -} - -// parseAuthScope parses an authentication scope string of the form `$resource:$remote:$actions` -func parseAuthScope(scopeStr string) (*authScope, error) { - if parts := strings.Split(scopeStr, ":"); len(parts) == 3 { - return &authScope{ - resourceType: parts[0], - remoteName: parts[1], - actions: parts[2], - }, nil - } - return nil, fmt.Errorf("error parsing auth scope: '%s'", scopeStr) -} - -// NOTE: This is not a fully compliant parser per RFC 7235: -// Most notably it does not support more than one challenge within a single header -// Some of the whitespace parsing also seems noncompliant. -// But it is clearly better than what we used to have… -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i++; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go deleted file mode 100644 index f31ee3124d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/blobinfocache.go +++ /dev/null @@ -1,55 +0,0 @@ -package blobinfocache - -import ( - "github.com/containers/image/v5/types" - digest "github.com/opencontainers/go-digest" -) - -// FromBlobInfoCache returns a BlobInfoCache2 based on a BlobInfoCache, returning the original -// object if it implements BlobInfoCache2, or a wrapper which discards compression information -// if it only implements BlobInfoCache. -func FromBlobInfoCache(bic types.BlobInfoCache) BlobInfoCache2 { - if bic2, ok := bic.(BlobInfoCache2); ok { - return bic2 - } - return &v1OnlyBlobInfoCache{ - BlobInfoCache: bic, - } -} - -type v1OnlyBlobInfoCache struct { - types.BlobInfoCache -} - -func (bic *v1OnlyBlobInfoCache) Open() { -} - -func (bic *v1OnlyBlobInfoCache) Close() { -} - -func (bic *v1OnlyBlobInfoCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { - return "" -} - -func (bic *v1OnlyBlobInfoCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { -} - -func (bic *v1OnlyBlobInfoCache) RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) { -} - -func (bic *v1OnlyBlobInfoCache) CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 { - return nil -} - -// CandidateLocationsFromV2 converts a slice of BICReplacementCandidate2 to a slice of -// types.BICReplacementCandidate, dropping compression information. -func CandidateLocationsFromV2(v2candidates []BICReplacementCandidate2) []types.BICReplacementCandidate { - candidates := make([]types.BICReplacementCandidate, 0, len(v2candidates)) - for _, c := range v2candidates { - candidates = append(candidates, types.BICReplacementCandidate{ - Digest: c.Digest, - Location: c.Location, - }) - } - return candidates -} diff --git a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go b/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go deleted file mode 100644 index acf82ee639..0000000000 --- a/vendor/github.com/containers/image/v5/internal/blobinfocache/types.go +++ /dev/null @@ -1,81 +0,0 @@ -package blobinfocache - -import ( - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - digest "github.com/opencontainers/go-digest" -) - -const ( - // Uncompressed is the value we store in a blob info cache to indicate that we know that - // the blob in the corresponding location is not compressed. - Uncompressed = "uncompressed" - // UnknownCompression is the value we store in a blob info cache to indicate that we don't - // know if the blob in the corresponding location is compressed (and if so, how) or not. - UnknownCompression = "unknown" -) - -// BlobInfoCache2 extends BlobInfoCache by adding the ability to track information about what kind -// of compression was applied to the blobs it keeps information about. -type BlobInfoCache2 interface { - types.BlobInfoCache - - // Open() sets up the cache for future accesses, potentially acquiring costly state. Each Open() must be paired with a Close(). - // Note that public callers may call the types.BlobInfoCache operations without Open()/Close(). - Open() - // Close destroys state created by Open(). - Close() - - // UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. - // Returns "" if the uncompressed digest is unknown. - UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest - // RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) - - // RecordDigestCompressorData records data for the blob with the specified digest. - // WARNING: Only call this with LOCALLY VERIFIED data: - // - don’t record a compressor for a digest just because some remote author claims so - // (e.g. because a manifest says so); - // - don’t record the non-base variant or annotations if we are not _sure_ that the base variant - // and the blob’s digest match the non-base variant’s annotations (e.g. because we saw them - // in a manifest) - // otherwise the cache could be poisoned and cause us to make incorrect edits to type - // information in a manifest. - RecordDigestCompressorData(anyDigest digest.Digest, data DigestCompressorData) - // CandidateLocations2 returns a prioritized, limited, number of blobs and their locations (if known) - // that could possibly be reused within the specified (transport scope) (if they still - // exist, which is not guaranteed). - CandidateLocations2(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, options CandidateLocations2Options) []BICReplacementCandidate2 -} - -// DigestCompressorData is information known about how a blob is compressed. -// (This is worded generically, but basically targeted at the zstd / zstd:chunked situation.) -type DigestCompressorData struct { - BaseVariantCompressor string // A compressor’s base variant name, or Uncompressed or UnknownCompression. - // The following fields are only valid if the base variant is neither Uncompressed nor UnknownCompression: - SpecificVariantCompressor string // A non-base variant compressor (or UnknownCompression if the true format is just the base variant) - SpecificVariantAnnotations map[string]string // Annotations required to benefit from the base variant. -} - -// CandidateLocations2Options are used in CandidateLocations2. -type CandidateLocations2Options struct { - // If !CanSubstitute, the returned candidates will match the submitted digest exactly; if - // CanSubstitute, data from previous RecordDigestUncompressedPair calls is used to also look - // up variants of the blob which have the same uncompressed digest. - CanSubstitute bool - PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer - RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm -} - -// BICReplacementCandidate2 is an item returned by BlobInfoCache2.CandidateLocations2. -type BICReplacementCandidate2 struct { - Digest digest.Digest - CompressionOperation types.LayerCompression // Either types.Decompress for uncompressed, or types.Compress for compressed - CompressionAlgorithm *compressiontypes.Algorithm // An algorithm when the candidate is compressed, or nil when it is uncompressed - CompressionAnnotations map[string]string // If necessary, annotations necessary to use CompressionAlgorithm - UnknownLocation bool // is true when `Location` for this blob is not set - Location types.BICLocationReference // not set if UnknownLocation is set to `true` -} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_list.go b/vendor/github.com/containers/image/v5/internal/image/docker_list.go deleted file mode 100644 index 617a451aa9..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/docker_list.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" -) - -func manifestSchema2FromManifestList(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - list, err := manifest.Schema2ListFromManifest(manblob) - if err != nil { - return nil, fmt.Errorf("parsing schema2 manifest list: %w", err) - } - targetManifestDigest, err := list.ChooseInstance(sys) - if err != nil { - return nil, fmt.Errorf("choosing image instance: %w", err) - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("fetching target platform image selected from manifest list: %w", err) - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go deleted file mode 100644 index 3ef8e144d7..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema1.go +++ /dev/null @@ -1,257 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type manifestSchema1 struct { - m *manifest.Schema1 -} - -func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) (genericManifest, error) { - m, err := manifest.Schema1FromComponents(ref, fsLayers, history, architecture) - if err != nil { - return nil, err - } - return &manifestSchema1{m: m}, nil -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema1) manifestMIMEType() string { - return manifest.DockerV2Schema1SignedMediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema1) ConfigBlob(context.Context) ([]byte, error) { - return nil, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - v2s2, err := m.convertToManifestSchema2(ctx, &types.ManifestUpdateOptions{}) - if err != nil { - return nil, err - } - return v2s2.OCIConfig(ctx) -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - // This is a bit convoluted: We can’t just have a "get embedded docker reference" method - // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually - // embed a full docker/distribution reference, but only the repo name and tag (without the host name). - // So we would have to provide a “return repo without host name, and tag” getter for the generic code, - // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the - // generic copy code needs to know about is reference.Named and that a manifest may need updating - // for some destinations. - name := reference.Path(ref) - var tag string - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } else { - tag = "" - } - return m.m.Name != name || m.m.Tag != tag -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema1) Inspect(context.Context) (*types.ImageInspectInfo, error) { - return m.m.Inspect(nil) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return (options.ManifestMIMEType == manifest.DockerV2Schema2MediaType || options.ManifestMIMEType == imgspecv1.MediaTypeImageManifest) -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -func (m *manifestSchema1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} - - // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, - // handle conversions between them by doing nothing. - if options.ManifestMIMEType != manifest.DockerV2Schema1MediaType && options.ManifestMIMEType != manifest.DockerV2Schema1SignedMediaType { - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - if options.EmbeddedDockerReference != nil { - copy.m.Name = reference.Path(options.EmbeddedDockerReference) - if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.m.Tag = tagged.Tag() - } else { - copy.m.Tag = "" - } - } - - return memoryImageFromManifest(©), nil -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestSchema1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { - uploadedLayerInfos := options.InformationOnly.LayerInfos - layerDiffIDs := options.InformationOnly.LayerDiffIDs - - if len(m.m.ExtractedV1Compatibility) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on FSLayers[0] and ExtractedV1Compatibility[0] existing. - return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) - } - if len(m.m.ExtractedV1Compatibility) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.ExtractedV1Compatibility), len(m.m.FSLayers)) - } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) - } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) - } - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.FSLayers) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.FSLayers)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - // Build a list of the diffIDs for the non-empty layers. - diffIDs := []digest.Digest{} - var layers []manifest.Schema2Descriptor - for v1Index := len(m.m.ExtractedV1Compatibility) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.m.ExtractedV1Compatibility) - 1) - v1Index - - if !m.m.ExtractedV1Compatibility[v1Index].ThrowAway { - var size int64 - if uploadedLayerInfos != nil { - size = uploadedLayerInfos[v2Index].Size - } - var d digest.Digest - if layerDiffIDs != nil { - d = layerDiffIDs[v2Index] - } - layers = append(layers, manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", - Size: size, - Digest: m.m.FSLayers[v1Index].BlobSum, - }) - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[v2Index]) - } - diffIDs = append(diffIDs, d) - } - } - configJSON, err := m.m.ToSchema2Config(diffIDs) - if err != nil { - return nil, err - } - configDescriptor := manifest.Schema2Descriptor{ - MediaType: "application/vnd.docker.container.image.v1+json", - Size: int64(len(configJSON)), - Digest: digest.FromBytes(configJSON), - } - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -func (m *manifestSchema1) convertToManifestOCI1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestOCI1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema1) SupportsEncryption(context.Context) bool { - return false -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestSchema1) CanChangeLayerCompression(mimeType string) bool { - return true // There are no MIME types in the manifest, so we must assume a valid image. -} diff --git a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go deleted file mode 100644 index 01219e3919..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/docker_schema2.go +++ /dev/null @@ -1,413 +0,0 @@ -package image - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -// GzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes) -// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is -// a non-zero embedded timestamp; we could zero that, but that would just waste storage space -// in registries, so let’s use the same values. -// -// This is publicly visible as c/image/image.GzippedEmptyLayer. -var GzippedEmptyLayer = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// GzippedEmptyLayerDigest is a digest of GzippedEmptyLayer -// -// This is publicly visible as c/image/image.GzippedEmptyLayerDigest. -const GzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - m *manifest.Schema2 -} - -func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.Schema2FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestSchema2{ - src: src, - m: m, - }, nil -} - -// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) *manifestSchema2 { - return &manifestSchema2{ - src: src, - configBlob: configBlob, - m: manifest.Schema2FromComponents(config, layers), - } -} - -func (m *manifestSchema2) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestSchema2) manifestMIMEType() string { - return m.m.MediaType -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestSchema2) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - configBlob, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields - // than OCI v1. This unmarshal makes sure we drop docker v2s2 - // fields that aren't needed in OCI v1. - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(configBlob, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestSchema2) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, fmt.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromSchema2Descriptor(m.m.ConfigDescriptor), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.ConfigDescriptor.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestSchema2) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the CompressionOperation and CompressionAlgorithm specified in one or more -// options.LayerInfos items is anything other than gzip. -func (m *manifestSchema2) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.Schema2Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - imgspecv1.MediaTypeImageManifest: copy.convertToManifestOCI1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestOCI1 returns a genericManifest implementation converted to imgspecv1.MediaTypeImageManifest. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -func (m *manifestSchema2) convertToManifestOCI1(ctx context.Context, _ *types.ManifestUpdateOptions) (genericManifest, error) { - configOCI, err := m.OCIConfig(ctx) - if err != nil { - return nil, err - } - configOCIBytes, err := json.Marshal(configOCI) - if err != nil { - return nil, err - } - - config := imgspecv1.Descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - } - - layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) - for idx := range layers { - layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) - switch m.m.LayersDescriptors[idx].MediaType { - case manifest.DockerV2Schema2ForeignLayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - case manifest.DockerV2Schema2ForeignLayerMediaTypeGzip: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributableGzip //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - case manifest.DockerV2SchemaLayerMediaTypeUncompressed: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayer - case manifest.DockerV2Schema2LayerMediaType: - layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", m.m.LayersDescriptors[idx].MediaType) - } - } - - return manifestOCI1FromComponents(config, m.src, configOCIBytes, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema2 object. -// -// Based on docker/distribution/manifest/schema1/config_builder.go -func (m *manifestSchema2) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - dest := options.InformationOnly.Destination - - var convertedLayerUpdates []types.BlobInfo // Only used if options.LayerInfos != nil - if options.LayerInfos != nil { - if len(options.LayerInfos) != len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Error converting image: layer edits for %d layers vs %d existing layers", - len(options.LayerInfos), len(m.m.LayersDescriptors)) - } - convertedLayerUpdates = []types.BlobInfo{} - } - - configBytes, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - imageConfig := &manifest.Schema2Image{} - if err := json.Unmarshal(configBytes, imageConfig); err != nil { - return nil, err - } - - // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) - history := make([]manifest.Schema1History, len(imageConfig.History)) - nonemptyLayerIndex := 0 - var parentV1ID string // Set in the loop - v1ID := "" - haveGzippedEmptyLayer := false - if len(imageConfig.History) == 0 { - // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. - return nil, fmt.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType) - } - for v2Index, historyEntry := range imageConfig.History { - parentV1ID = v1ID - v1Index := len(imageConfig.History) - 1 - v2Index - - var blobDigest digest.Digest - if historyEntry.EmptyLayer { - emptyLayerBlobInfo := types.BlobInfo{Digest: GzippedEmptyLayerDigest, Size: int64(len(GzippedEmptyLayer))} - - if !haveGzippedEmptyLayer { - logrus.Debugf("Uploading empty layer during conversion to schema 1") - // Ideally we should update the relevant BlobInfoCache about this layer, but that would require passing it down here, - // and anyway this blob is so small that it’s easier to just copy it than to worry about figuring out another location where to get it. - info, err := dest.PutBlob(ctx, bytes.NewReader(GzippedEmptyLayer), emptyLayerBlobInfo, none.NoCache, false) - if err != nil { - return nil, fmt.Errorf("uploading empty layer: %w", err) - } - if info.Digest != emptyLayerBlobInfo.Digest { - return nil, fmt.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, emptyLayerBlobInfo.Digest) - } - haveGzippedEmptyLayer = true - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, emptyLayerBlobInfo) - } - blobDigest = emptyLayerBlobInfo.Digest - } else { - if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { - return nil, fmt.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) - } - if options.LayerInfos != nil { - convertedLayerUpdates = append(convertedLayerUpdates, options.LayerInfos[nonemptyLayerIndex]) - } - blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest - nonemptyLayerIndex++ - } - - // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency. - v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID) - if err != nil { - return nil, err - } - v1ID = v - - fakeImage := manifest.Schema1V1Compatibility{ - ID: v1ID, - Parent: parentV1ID, - Comment: historyEntry.Comment, - Created: historyEntry.Created, - Author: historyEntry.Author, - ThrowAway: historyEntry.EmptyLayer, - } - fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy} - v1CompatibilityBytes, err := json.Marshal(&fakeImage) - if err != nil { - return nil, fmt.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) - } - - fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} - history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} - // Note that parentV1ID of the top layer is preserved when exiting this loop - } - - // Now patch in real configuration for the top layer (v1Index == 0) - v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency. - if err != nil { - return nil, err - } - v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer) - if err != nil { - return nil, err - } - history[0].V1Compatibility = string(v1Config) - - if options.LayerInfos != nil { - options.LayerInfos = convertedLayerUpdates - } - m1, err := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture) - if err != nil { - return nil, err // This should never happen, we should have created all the components correctly. - } - return m1, nil -} - -func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) { - if err := blobDigest.Validate(); err != nil { - return "", err - } - parts := append([]string{blobDigest.Encoded()}, others...) - v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " "))) - return hex.EncodeToString(v1IDHash[:]), nil -} - -func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - delete(rawContents, "rootfs") - delete(rawContents, "history") - - updates := map[string]any{"id": v1ID} - if parentV1ID != "" { - updates["parent"] = parentV1ID - } - if throwaway { - updates["throwaway"] = throwaway - } - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestSchema2) SupportsEncryption(context.Context) bool { - return false -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestSchema2) CanChangeLayerCompression(mimeType string) bool { - return m.m.CanChangeLayerCompression(mimeType) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/manifest.go b/vendor/github.com/containers/image/v5/internal/image/manifest.go deleted file mode 100644 index ed57e08dd8..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/manifest.go +++ /dev/null @@ -1,121 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// genericManifest is an interface for parsing, modifying image manifests and related data. -// The public methods are related to types.Image so that embedding a genericManifest implements most of it, -// but there are also public methods that are only visible by packages that can import c/image/internal/image. -type genericManifest interface { - serialize() ([]byte, error) - manifestMIMEType() string - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() types.BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*imgspecv1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []types.BlobInfo - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*types.ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // This does not change the state of the original Image object. - UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) - // SupportsEncryption returns if encryption is supported for the manifest type - // - // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since - // the process of updating a manifest between different manifest types was to update then convert. - // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 - SupportsEncryption(ctx context.Context) bool - - // The following methods are not a part of types.Image: - // === - - // CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image - // (and the code can handle that). - // NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted - // algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts - // to a different manifest format). - CanChangeLayerCompression(mimeType string) bool -} - -// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. -// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. -func manifestInstanceFromBlob(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch manifest.NormalizedMIMEType(mt) { - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: - return manifestSchema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return manifestOCI1FromManifest(src, manblob) - case manifest.DockerV2Schema2MediaType: - return manifestSchema2FromManifest(src, manblob) - case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(ctx, sys, src, manblob) - case imgspecv1.MediaTypeImageIndex: - return manifestOCI1FromImageIndex(ctx, sys, src, manblob) - default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %q", mt) - } -} - -// manifestLayerInfosToBlobInfos extracts a []types.BlobInfo from a []manifest.LayerInfo. -func manifestLayerInfosToBlobInfos(layers []manifest.LayerInfo) []types.BlobInfo { - blobs := make([]types.BlobInfo, len(layers)) - for i, layer := range layers { - blobs[i] = layer.BlobInfo - } - return blobs -} - -// manifestConvertFn (a method of genericManifest object) returns a genericManifest implementation -// converted to a specific manifest MIME type. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original genericManifest object. -type manifestConvertFn func(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) - -// convertManifestIfRequiredWithUpdate will run conversion functions of a manifest if -// required and re-apply the options to the converted type. -// It returns (nil, nil) if no conversion was requested. -func convertManifestIfRequiredWithUpdate(ctx context.Context, options types.ManifestUpdateOptions, converters map[string]manifestConvertFn) (types.Image, error) { - if options.ManifestMIMEType == "" { - return nil, nil - } - - converter, ok := converters[options.ManifestMIMEType] - if !ok { - return nil, fmt.Errorf("Unsupported conversion type: %v", options.ManifestMIMEType) - } - - optionsCopy := options - convertedManifest, err := converter(ctx, &optionsCopy) - if err != nil { - return nil, err - } - convertedImage := memoryImageFromManifest(convertedManifest) - - optionsCopy.ManifestMIMEType = "" - return convertedImage.UpdatedImage(ctx, optionsCopy) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/memory.go b/vendor/github.com/containers/image/v5/internal/image/memory.go deleted file mode 100644 index e22c7aafdf..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/memory.go +++ /dev/null @@ -1,64 +0,0 @@ -package image - -import ( - "context" - "errors" - - "github.com/containers/image/v5/types" -) - -// memoryImage is a mostly-implementation of types.Image assembled from data -// created in memory, used primarily as a return value of types.Image.UpdatedImage -// as a way to carry various structured information in a type-safe and easy-to-use way. -// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone -// collection of all related information, e.g. there is no way to get layer blobs -// from a memoryImage. -type memoryImage struct { - genericManifest - serializedManifest []byte // A private cache for Manifest() -} - -func memoryImageFromManifest(m genericManifest) types.Image { - return &memoryImage{ - genericManifest: m, - serializedManifest: nil, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *memoryImage) Reference() types.ImageReference { - // It would really be inappropriate to return the ImageReference of the image this was based on. - return nil -} - -// Size returns the size of the image as stored, if known, or -1 if not. -func (i *memoryImage) Size() (int64, error) { - return -1, nil -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.serializedManifest == nil { - m, err := i.genericManifest.serialize() - if err != nil { - return nil, "", err - } - i.serializedManifest = m - } - return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) { - // Modifying an image invalidates signatures; a caller asking the updated image for signatures - // is probably confused. - return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory") -} - -// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (i *memoryImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci.go b/vendor/github.com/containers/image/v5/internal/image/oci.go deleted file mode 100644 index aaef95ff3d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/oci.go +++ /dev/null @@ -1,336 +0,0 @@ -package image - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "slices" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/iolimits" - internalManifest "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/pkg/blobinfocache/none" - "github.com/containers/image/v5/types" - ociencspec "github.com/containers/ocicrypt/spec" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of m.Config. - m *manifest.OCI1 -} - -func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { - m, err := manifest.OCI1FromManifest(manifestBlob) - if err != nil { - return nil, err - } - return &manifestOCI1{ - src: src, - m: m, - }, nil -} - -// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { - return &manifestOCI1{ - src: src, - configBlob: configBlob, - m: manifest.OCI1FromComponents(config, layers), - } -} - -func (m *manifestOCI1) serialize() ([]byte, error) { - return m.m.Serialize() -} - -func (m *manifestOCI1) manifestMIMEType() string { - return imgspecv1.MediaTypeImageManifest -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. -func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return m.m.ConfigInfo() -} - -// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. -// The result is cached; it is OK to call this however often you need. -func (m *manifestOCI1) ConfigBlob(ctx context.Context) ([]byte, error) { - if m.configBlob == nil { - if m.src == nil { - return nil, errors.New("Internal error: neither src nor configBlob set in manifestOCI1") - } - stream, _, err := m.src.GetBlob(ctx, manifest.BlobInfoFromOCI1Descriptor(m.m.Config), none.NoCache) - if err != nil { - return nil, err - } - defer stream.Close() - blob, err := iolimits.ReadAtMost(stream, iolimits.MaxConfigBodySize) - if err != nil { - return nil, err - } - computedDigest := digest.FromBytes(blob) - if computedDigest != m.m.Config.Digest { - return nil, fmt.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) - } - m.configBlob = blob - } - return m.configBlob, nil -} - -// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about -// layers in the resulting configuration isn't guaranteed to be returned to due how -// old image manifests work (docker v2s1 especially). -func (m *manifestOCI1) OCIConfig(ctx context.Context) (*imgspecv1.Image, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - cb, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - configOCI := &imgspecv1.Image{} - if err := json.Unmarshal(cb, configOCI); err != nil { - return nil, err - } - return configOCI, nil -} - -// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - return manifestLayerInfosToBlobInfos(m.m.LayerInfos()) -} - -// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. -// It returns false if the manifest does not embed a Docker reference. -// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) -func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool { - return false -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *manifestOCI1) Inspect(ctx context.Context) (*types.ImageInspectInfo, error) { - getter := func(info types.BlobInfo) ([]byte, error) { - if info.Digest != m.ConfigInfo().Digest { - // Shouldn't ever happen - return nil, errors.New("asked for a different config blob") - } - config, err := m.ConfigBlob(ctx) - if err != nil { - return nil, err - } - return config, nil - } - return m.m.Inspect(getter) -} - -// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. -// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute -// (most importantly it forces us to download the full layers even if they are already present at the destination). -func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool { - return false -} - -// UpdatedImage returns a types.Image modified according to options. -// This does not change the state of the original Image object. -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError -// if the combination of CompressionOperation and CompressionAlgorithm specified -// in one or more options.LayerInfos items indicates that a layer is compressed using -// an algorithm that is not allowed in OCI. -func (m *manifestOCI1) UpdatedImage(ctx context.Context, options types.ManifestUpdateOptions) (types.Image, error) { - copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. - src: m.src, - configBlob: m.configBlob, - m: manifest.OCI1Clone(m.m), - } - - converted, err := convertManifestIfRequiredWithUpdate(ctx, options, map[string]manifestConvertFn{ - manifest.DockerV2Schema2MediaType: copy.convertToManifestSchema2Generic, - manifest.DockerV2Schema1MediaType: copy.convertToManifestSchema1, - manifest.DockerV2Schema1SignedMediaType: copy.convertToManifestSchema1, - }) - if err != nil { - return nil, err - } - - if converted != nil { - return converted, nil - } - - // No conversion required, update manifest - if options.LayerInfos != nil { - if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { - return nil, err - } - } - // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. - - return memoryImageFromManifest(©), nil -} - -func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { - return manifest.Schema2Descriptor{ - MediaType: d.MediaType, - Size: d.Size, - Digest: d.Digest, - URLs: d.URLs, - } -} - -// convertToManifestSchema2Generic returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestSchema1 object. -// -// We need this function just because a function returning an implementation of the genericManifest -// interface is not automatically assignable to a function type returning the genericManifest interface -func (m *manifestOCI1) convertToManifestSchema2Generic(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - return m.convertToManifestSchema2(ctx, options) -} - -// layerEditsOfOCIOnlyFeatures checks if options requires some layer edits to be done before converting to a Docker format. -// If not, it returns (nil, nil). -// If decryption is required, it returns a set of edits to provide to OCI1.UpdateLayerInfos, -// and edits *options to not try decryption again. -func (m *manifestOCI1) layerEditsOfOCIOnlyFeatures(options *types.ManifestUpdateOptions) ([]types.BlobInfo, error) { - if options == nil || options.LayerInfos == nil { - return nil, nil - } - - originalInfos := m.LayerInfos() - if len(originalInfos) != len(options.LayerInfos) { - return nil, fmt.Errorf("preparing to decrypt before conversion: %d layers vs. %d layer edits", len(originalInfos), len(options.LayerInfos)) - } - - ociOnlyEdits := slices.Clone(originalInfos) // Start with a full copy so that we don't forget to copy anything: use the current data in full unless we intentionally deviate. - laterEdits := slices.Clone(options.LayerInfos) - needsOCIOnlyEdits := false - for i, edit := range options.LayerInfos { - // Unless determined otherwise, don't do any compression-related MIME type conversions. m.LayerInfos() should not set these edit instructions, but be explicit. - ociOnlyEdits[i].CompressionOperation = types.PreserveOriginal - ociOnlyEdits[i].CompressionAlgorithm = nil - - if edit.CryptoOperation == types.Decrypt { - needsOCIOnlyEdits = true // Encrypted types must be removed before conversion because they can’t be represented in Docker schemas - ociOnlyEdits[i].CryptoOperation = types.Decrypt - laterEdits[i].CryptoOperation = types.PreserveOriginalCrypto // Don't try to decrypt in a schema[12] manifest later, that would fail. - } - - if originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerZstd || - originalInfos[i].MediaType == imgspecv1.MediaTypeImageLayerNonDistributableZstd { //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - needsOCIOnlyEdits = true // Zstd MIME types must be removed before conversion because they can’t be represented in Docker schemas. - ociOnlyEdits[i].CompressionOperation = edit.CompressionOperation - ociOnlyEdits[i].CompressionAlgorithm = edit.CompressionAlgorithm - laterEdits[i].CompressionOperation = types.PreserveOriginal - laterEdits[i].CompressionAlgorithm = nil - } - } - if !needsOCIOnlyEdits { - return nil, nil - } - - options.LayerInfos = laterEdits - return ociOnlyEdits, nil -} - -// convertToManifestSchema2 returns a genericManifest implementation converted to manifest.DockerV2Schema2MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema2(_ context.Context, options *types.ManifestUpdateOptions) (*manifestSchema2, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - // Mostly we first make a format conversion, and _afterwards_ do layer edits. But first we need to do the layer edits - // which remove OCI-specific features, because trying to convert those layers would fail. - // So, do the layer updates for decryption, and for conversions from Zstd. - ociManifest := m.m - ociOnlyEdits, err := m.layerEditsOfOCIOnlyFeatures(options) - if err != nil { - return nil, err - } - if ociOnlyEdits != nil { - ociManifest = manifest.OCI1Clone(ociManifest) - if err := ociManifest.UpdateLayerInfos(ociOnlyEdits); err != nil { - return nil, err - } - } - - // Create a copy of the descriptor. - config := schema2DescriptorFromOCI1Descriptor(ociManifest.Config) - - // Above, we have already checked that this manifest refers to an image, not an OCI artifact, - // so the only difference between OCI and DockerSchema2 is the mediatypes. The - // media type of the manifest is handled by manifestSchema2FromComponents. - config.MediaType = manifest.DockerV2Schema2ConfigMediaType - - layers := make([]manifest.Schema2Descriptor, len(ociManifest.Layers)) - for idx := range layers { - layers[idx] = schema2DescriptorFromOCI1Descriptor(ociManifest.Layers[idx]) - switch layers[idx].MediaType { - case imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaType - case imgspecv1.MediaTypeImageLayerNonDistributableGzip: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - layers[idx].MediaType = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip - case imgspecv1.MediaTypeImageLayerNonDistributableZstd: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - case imgspecv1.MediaTypeImageLayer: - layers[idx].MediaType = manifest.DockerV2SchemaLayerMediaTypeUncompressed - case imgspecv1.MediaTypeImageLayerGzip: - layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType - case imgspecv1.MediaTypeImageLayerZstd: - return nil, fmt.Errorf("Error during manifest conversion: %q: zstd compression is not supported for docker images", layers[idx].MediaType) - case ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc, ociencspec.MediaTypeLayerZstdEnc, - ociencspec.MediaTypeLayerNonDistributableEnc, ociencspec.MediaTypeLayerNonDistributableGzipEnc, ociencspec.MediaTypeLayerNonDistributableZstdEnc: - return nil, fmt.Errorf("during manifest conversion: encrypted layers (%q) are not supported in docker images", layers[idx].MediaType) - default: - return nil, fmt.Errorf("Unknown media type during manifest conversion: %q", layers[idx].MediaType) - } - } - - // Rather than copying the ConfigBlob now, we just pass m.src to the - // translated manifest, since the only difference is the mediatype of - // descriptors there is no change to any blob stored in m.src. - return manifestSchema2FromComponents(config, m.src, nil, layers), nil -} - -// convertToManifestSchema1 returns a genericManifest implementation converted to manifest.DockerV2Schema1{Signed,}MediaType. -// It may use options.InformationOnly and also adjust *options to be appropriate for editing the returned -// value. -// This does not change the state of the original manifestOCI1 object. -func (m *manifestOCI1) convertToManifestSchema1(ctx context.Context, options *types.ManifestUpdateOptions) (genericManifest, error) { - if m.m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return nil, internalManifest.NewNonImageArtifactError(&m.m.Manifest) - } - - // We can't directly convert images to V1, but we can transitively convert via a V2 image - m2, err := m.convertToManifestSchema2(ctx, options) - if err != nil { - return nil, err - } - - return m2.convertToManifestSchema1(ctx, options) -} - -// SupportsEncryption returns if encryption is supported for the manifest type -func (m *manifestOCI1) SupportsEncryption(context.Context) bool { - return true -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion (if UpdatedImage converts -// to a different manifest format). -func (m *manifestOCI1) CanChangeLayerCompression(mimeType string) bool { - return m.m.CanChangeLayerCompression(mimeType) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/oci_index.go b/vendor/github.com/containers/image/v5/internal/image/oci_index.go deleted file mode 100644 index 0e945c8519..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/oci_index.go +++ /dev/null @@ -1,34 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" -) - -func manifestOCI1FromImageIndex(ctx context.Context, sys *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { - index, err := manifest.OCI1IndexFromManifest(manblob) - if err != nil { - return nil, fmt.Errorf("parsing OCI1 index: %w", err) - } - targetManifestDigest, err := index.ChooseInstance(sys) - if err != nil { - return nil, fmt.Errorf("choosing image instance: %w", err) - } - manblob, mt, err := src.GetManifest(ctx, &targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("fetching target platform image selected from image index: %w", err) - } - - matches, err := manifest.MatchesDigest(manblob, targetManifestDigest) - if err != nil { - return nil, fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, fmt.Errorf("Image manifest does not match selected manifest digest %s", targetManifestDigest) - } - - return manifestInstanceFromBlob(ctx, sys, src, manblob, mt) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/sourced.go b/vendor/github.com/containers/image/v5/internal/image/sourced.go deleted file mode 100644 index 661891aa55..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/sourced.go +++ /dev/null @@ -1,134 +0,0 @@ -// Package image consolidates knowledge about various container image formats -// (as opposed to image storage mechanisms, which are handled by types.ImageSource) -// and exposes all of them using an unified interface. -package image - -import ( - "context" - - "github.com/containers/image/v5/types" -) - -// FromReference returns a types.ImageCloser implementation for the default instance reading from reference. -// If reference points to a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromReference(ctx context.Context, sys *types.SystemContext, ref types.ImageReference) (types.ImageCloser, error) { - src, err := ref.NewImageSource(ctx, sys) - if err != nil { - return nil, err - } - img, err := FromSource(ctx, sys, src) - if err != nil { - src.Close() - return nil, err - } - return img, nil -} - -// imageCloser implements types.ImageCloser, perhaps allowing simple users -// to use a single object without having keep a reference to a types.ImageSource -// only to call types.ImageSource.Close(). -type imageCloser struct { - types.Image - src types.ImageSource -} - -// FromSource returns a types.ImageCloser implementation for the default instance of source. -// If source is a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate image instance. -// -// The caller must call .Close() on the returned ImageCloser. -// -// FromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// Image and ImageSource objects simultaneously, but it means that they only need to -// the Image.) -// -// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, -// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -// -// Most callers can use either FromUnparsedImage or FromReference instead. -// -// This is publicly visible as c/image/image.FromSource. -func FromSource(ctx context.Context, sys *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { - img, err := FromUnparsedImage(ctx, sys, UnparsedInstance(src, nil)) - if err != nil { - return nil, err - } - return &imageCloser{ - Image: img, - src: src, - }, nil -} - -func (ic *imageCloser) Close() error { - return ic.src.Close() -} - -// SourcedImage is a general set of utilities for working with container images, -// whatever is their underlying transport (i.e. ImageSource-independent). -// Note the existence of docker.Image and image.memoryImage: various instances -// of a types.Image may not be a SourcedImage directly. -// -// Most external users of `types.Image` do not care, and those who care about `docker.Image` know they do. -// -// Internal users may depend on methods available in SourcedImage but not (yet?) in types.Image. -type SourcedImage struct { - *UnparsedImage - ManifestBlob []byte // The manifest of the relevant instance - ManifestMIMEType string // MIME type of ManifestBlob - // genericManifest contains data corresponding to manifestBlob. - // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest - // if you want to preserve the original manifest; use manifestBlob directly. - genericManifest -} - -// FromUnparsedImage returns a types.Image implementation for unparsed. -// If unparsed represents a manifest list, .Manifest() still returns the manifest list, -// but other methods transparently return data from an appropriate single image. -// -// The Image must not be used after the underlying ImageSource is Close()d. -// -// This is publicly visible as c/image/image.FromUnparsedImage. -func FromUnparsedImage(ctx context.Context, sys *types.SystemContext, unparsed *UnparsedImage) (*SourcedImage, error) { - // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: - // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, - // this is the only UnparsedImage implementation around, anyway. - - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). - manifestBlob, manifestMIMEType, err := unparsed.Manifest(ctx) - if err != nil { - return nil, err - } - - parsedManifest, err := manifestInstanceFromBlob(ctx, sys, unparsed.src, manifestBlob, manifestMIMEType) - if err != nil { - return nil, err - } - - return &SourcedImage{ - UnparsedImage: unparsed, - ManifestBlob: manifestBlob, - ManifestMIMEType: manifestMIMEType, - genericManifest: parsedManifest, - }, nil -} - -// Size returns the size of the image as stored, if it's known, or -1 if it isn't. -func (i *SourcedImage) Size() (int64, error) { - return -1, nil -} - -// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched. -func (i *SourcedImage) Manifest(ctx context.Context) ([]byte, string, error) { - return i.ManifestBlob, i.ManifestMIMEType, nil -} - -func (i *SourcedImage) LayerInfosForCopy(ctx context.Context) ([]types.BlobInfo, error) { - return i.UnparsedImage.src.LayerInfosForCopy(ctx, i.UnparsedImage.instanceDigest) -} diff --git a/vendor/github.com/containers/image/v5/internal/image/unparsed.go b/vendor/github.com/containers/image/v5/internal/image/unparsed.go deleted file mode 100644 index 1cffe4311b..0000000000 --- a/vendor/github.com/containers/image/v5/internal/image/unparsed.go +++ /dev/null @@ -1,125 +0,0 @@ -package image - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/imagesource" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/manifest" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// UnparsedImage implements types.UnparsedImage . -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// This is publicly visible as c/image/image.UnparsedImage. -type UnparsedImage struct { - src private.ImageSource - instanceDigest *digest.Digest - cachedManifest []byte // A private cache for Manifest(); nil if not yet known. - // A private cache for Manifest(), may be the empty string if guessing failed. - // Valid iff cachedManifest is not nil. - cachedManifestMIMEType string - cachedSignatures []signature.Signature // A private cache for Signatures(); nil if not yet known. -} - -// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). -// -// This implementation of [types.UnparsedImage] ensures that [types.UnparsedImage.Manifest] validates the image -// against instanceDigest if set, or, if not, a digest implied by src.Reference, if any. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -// -// This is publicly visible as c/image/image.UnparsedInstance. -func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { - return &UnparsedImage{ - src: imagesource.FromPublic(src), - instanceDigest: instanceDigest, - } -} - -// Reference returns the reference used to set up this source, _as specified by the user_ -// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. -func (i *UnparsedImage) Reference() types.ImageReference { - // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. - return i.src.Reference() -} - -// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. -// -// Users of UnparsedImage are promised that this validates the image -// against either i.instanceDigest if set, or against a digest included in i.src.Reference. -func (i *UnparsedImage) Manifest(ctx context.Context) ([]byte, string, error) { - if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest(ctx, i.instanceDigest) - if err != nil { - return nil, "", err - } - - // ImageSource.GetManifest does not do digest verification, but we do; - // this immediately protects also any user of types.Image. - if digest, haveDigest := i.expectedManifestDigest(); haveDigest { - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", fmt.Errorf("computing manifest digest: %w", err) - } - if !matches { - return nil, "", fmt.Errorf("Manifest does not match provided manifest digest %s", digest) - } - } - - i.cachedManifest = m - i.cachedManifestMIMEType = mt - } - return i.cachedManifest, i.cachedManifestMIMEType, nil -} - -// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. -// The bool return value seems redundant with digest != ""; it is used explicitly -// to refuse (unexpected) situations when the digest exists but is "". -func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { - if i.instanceDigest != nil { - return *i.instanceDigest, true - } - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - return canonical.Digest(), true - } - } - return "", false -} - -// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { - // It would be consistent to make this an internal/unparsedimage/impl.Compat wrapper, - // but this is very likely to be the only implementation ever. - sigs, err := i.UntrustedSignatures(ctx) - if err != nil { - return nil, err - } - simpleSigs := [][]byte{} - for _, sig := range sigs { - if sig, ok := sig.(signature.SimpleSigning); ok { - simpleSigs = append(simpleSigs, sig.UntrustedSignature()) - } - } - return simpleSigs, nil -} - -// UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. -func (i *UnparsedImage) UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) { - if i.cachedSignatures == nil { - sigs, err := i.src.GetSignaturesWithFormat(ctx, i.instanceDigest) - if err != nil { - return nil, err - } - i.cachedSignatures = sigs - } - return i.cachedSignatures, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go deleted file mode 100644 index 70b207d9b5..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/compat.go +++ /dev/null @@ -1,114 +0,0 @@ -package impl - -import ( - "context" - "io" - - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// Compat implements the obsolete parts of types.ImageDestination -// for implementations of private.ImageDestination. -// See AddCompat below. -type Compat struct { - dest private.ImageDestinationInternalOnly -} - -// AddCompat initializes Compat to implement the obsolete parts of types.ImageDestination -// for implementations of private.ImageDestination. -// -// Use it like this: -// -// type yourDestination struct { -// impl.Compat -// … -// } -// -// dest := &yourDestination{…} -// dest.Compat = impl.AddCompat(dest) -func AddCompat(dest private.ImageDestinationInternalOnly) Compat { - return Compat{dest} -} - -// PutBlob writes contents of stream and returns data representing the result. -// inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. -// inputInfo.Size is the expected length of stream, if known. -// inputInfo.MediaType describes the blob format, if known. -// May update cache. -// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available -// to any other readers for download using the supplied digest. -// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. -func (c *Compat) PutBlob(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, cache types.BlobInfoCache, isConfig bool) (types.BlobInfo, error) { - res, err := c.dest.PutBlobWithOptions(ctx, stream, inputInfo, private.PutBlobOptions{ - Cache: blobinfocache.FromBlobInfoCache(cache), - IsConfig: isConfig, - }) - if err != nil { - return types.BlobInfo{}, err - } - return types.BlobInfo{ - Digest: res.Digest, - Size: res.Size, - }, nil -} - -// TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination -// (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). -// info.Digest must not be empty. -// If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. -// If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may -// include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be -// reflected in the manifest that will be written. -// If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. -// May use and/or update cache. -func (c *Compat) TryReusingBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache, canSubstitute bool) (bool, types.BlobInfo, error) { - reused, blob, err := c.dest.TryReusingBlobWithOptions(ctx, info, private.TryReusingBlobOptions{ - Cache: blobinfocache.FromBlobInfoCache(cache), - CanSubstitute: canSubstitute, - }) - if !reused || err != nil { - return reused, types.BlobInfo{}, err - } - res := types.BlobInfo{ - Digest: blob.Digest, - Size: blob.Size, - CompressionOperation: blob.CompressionOperation, - CompressionAlgorithm: blob.CompressionAlgorithm, - } - // This is probably not necessary; we preserve MediaType to decrease risks of breaking for external callers. - // Some transports were not setting the MediaType field anyway, and others were setting the old value on substitution; - // provide the value in cases where it is likely to be correct. - if blob.Digest == info.Digest { - res.MediaType = info.MediaType - } - return true, res, nil -} - -// PutSignatures writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (c *Compat) PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error { - withFormat := []signature.Signature{} - for _, sig := range signatures { - withFormat = append(withFormat, signature.SimpleSigningFromBlob(sig)) - } - return c.dest.PutSignaturesWithFormat(ctx, withFormat, instanceDigest) -} - -// Commit marks the process of storing the image as successful and asks for the image to be persisted. -// unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list -// if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the -// original manifest list digest, if desired. -// WARNING: This does not have any transactional semantics: -// - Uploaded data MAY be visible to others before Commit() is called -// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) -func (c *Compat) Commit(ctx context.Context, unparsedToplevel types.UnparsedImage) error { - return c.dest.CommitWithOptions(ctx, private.CommitOptions{ - UnparsedToplevel: unparsedToplevel, - }) -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go deleted file mode 100644 index 9b42cfbec0..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/helpers.go +++ /dev/null @@ -1,15 +0,0 @@ -package impl - -import ( - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/internal/private" -) - -// OriginalCandidateMatchesTryReusingBlobOptions returns true if the original blob passed to TryReusingBlobWithOptions -// is acceptable based on opts. -func OriginalCandidateMatchesTryReusingBlobOptions(opts private.TryReusingBlobOptions) bool { - return manifest.CandidateCompressionMatchesReuseConditions(manifest.ReuseConditions{ - PossibleManifestFormats: opts.PossibleManifestFormats, - RequiredCompression: opts.RequiredCompression, - }, opts.OriginalCompression) -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go deleted file mode 100644 index 704812e9ab..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/impl/properties.go +++ /dev/null @@ -1,72 +0,0 @@ -package impl - -import "github.com/containers/image/v5/types" - -// Properties collects properties of an ImageDestination that are constant throughout its lifetime -// (but might differ across instances). -type Properties struct { - // SupportedManifestMIMETypes tells which manifest MIME types the destination supports. - // A empty slice or nil means any MIME type can be tried to upload. - SupportedManifestMIMETypes []string - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression types.LayerCompression - // AcceptsForeignLayerURLs is false if foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs bool - // MustMatchRuntimeOS is set to true if the destination can store only images targeted for the current runtime architecture and OS. - MustMatchRuntimeOS bool - // IgnoresEmbeddedDockerReference is set to true if the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference bool - // HasThreadSafePutBlob indicates that PutBlob can be executed concurrently. - HasThreadSafePutBlob bool -} - -// PropertyMethodsInitialize implements parts of private.ImageDestination corresponding to Properties. -type PropertyMethodsInitialize struct { - // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. - - vals Properties -} - -// PropertyMethods creates an PropertyMethodsInitialize for vals. -func PropertyMethods(vals Properties) PropertyMethodsInitialize { - return PropertyMethodsInitialize{ - vals: vals, - } -} - -// SupportedManifestMIMETypes tells which manifest mime types the destination supports -// If an empty slice or nil it's returned, then any mime type can be tried to upload -func (o PropertyMethodsInitialize) SupportedManifestMIMETypes() []string { - return o.vals.SupportedManifestMIMETypes -} - -// DesiredLayerCompression indicates the kind of compression to apply on layers -func (o PropertyMethodsInitialize) DesiredLayerCompression() types.LayerCompression { - return o.vals.DesiredLayerCompression -} - -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually -// uploaded to the image destination, true otherwise. -func (o PropertyMethodsInitialize) AcceptsForeignLayerURLs() bool { - return o.vals.AcceptsForeignLayerURLs -} - -// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. -func (o PropertyMethodsInitialize) MustMatchRuntimeOS() bool { - return o.vals.MustMatchRuntimeOS -} - -// IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), -// and would prefer to receive an unmodified manifest instead of one modified for the destination. -// Does not make a difference if Reference().DockerReference() is nil. -func (o PropertyMethodsInitialize) IgnoresEmbeddedDockerReference() bool { - return o.vals.IgnoresEmbeddedDockerReference -} - -// HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. -func (o PropertyMethodsInitialize) HasThreadSafePutBlob() bool { - return o.vals.HasThreadSafePutBlob -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go deleted file mode 100644 index c4536e933b..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/original_oci_config.go +++ /dev/null @@ -1,16 +0,0 @@ -package stubs - -import ( - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// IgnoresOriginalOCIConfig implements NoteOriginalOCIConfig() that does nothing. -type IgnoresOriginalOCIConfig struct{} - -// NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, -// or an error obtaining that value (e.g. if the image is an artifact and not a container image). -// The destination can use it in its TryReusingBlob/PutBlob implementations -// (otherwise it only obtains the final config after all layers are written). -func (stub IgnoresOriginalOCIConfig) NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error { - return nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go deleted file mode 100644 index 22bed4b0fa..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/put_blob_partial.go +++ /dev/null @@ -1,52 +0,0 @@ -package stubs - -import ( - "context" - "fmt" - - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/types" -) - -// NoPutBlobPartialInitialize implements parts of private.ImageDestination -// for transports that don’t support PutBlobPartial(). -// See NoPutBlobPartial() below. -type NoPutBlobPartialInitialize struct { - transportName string -} - -// NoPutBlobPartial creates a NoPutBlobPartialInitialize for ref. -func NoPutBlobPartial(ref types.ImageReference) NoPutBlobPartialInitialize { - return NoPutBlobPartialRaw(ref.Transport().Name()) -} - -// NoPutBlobPartialRaw is the same thing as NoPutBlobPartial, but it can be used -// in situations where no ImageReference is available. -func NoPutBlobPartialRaw(transportName string) NoPutBlobPartialInitialize { - return NoPutBlobPartialInitialize{ - transportName: transportName, - } -} - -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (stub NoPutBlobPartialInitialize) SupportsPutBlobPartial() bool { - return false -} - -// PutBlobPartial attempts to create a blob using the data that is already present -// at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. -// It is available only if SupportsPutBlobPartial(). -// Even if SupportsPutBlobPartial() returns true, the call can fail. -// If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. -// The fallback _must not_ be done otherwise. -func (stub NoPutBlobPartialInitialize) PutBlobPartial(ctx context.Context, chunkAccessor private.BlobChunkAccessor, srcInfo types.BlobInfo, options private.PutBlobPartialOptions) (private.UploadedBlob, error) { - return private.UploadedBlob{}, fmt.Errorf("internal error: PutBlobPartial is not supported by the %q transport", stub.transportName) -} - -// ImplementsPutBlobPartial implements SupportsPutBlobPartial() that returns true. -type ImplementsPutBlobPartial struct{} - -// SupportsPutBlobPartial returns true if PutBlobPartial is supported. -func (stub ImplementsPutBlobPartial) SupportsPutBlobPartial() bool { - return true -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go deleted file mode 100644 index 7015fd0689..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/signatures.go +++ /dev/null @@ -1,50 +0,0 @@ -package stubs - -import ( - "context" - "errors" - - "github.com/containers/image/v5/internal/signature" - "github.com/opencontainers/go-digest" -) - -// NoSignaturesInitialize implements parts of private.ImageDestination -// for transports that don’t support storing signatures. -// See NoSignatures() below. -type NoSignaturesInitialize struct { - message string -} - -// NoSignatures creates a NoSignaturesInitialize, failing with message. -func NoSignatures(message string) NoSignaturesInitialize { - return NoSignaturesInitialize{ - message: message, - } -} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (stub NoSignaturesInitialize) SupportsSignatures(ctx context.Context) error { - return errors.New(stub.message) -} - -// PutSignaturesWithFormat writes a set of signatures to the destination. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for -// (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. -// MUST be called after PutManifest (signatures may reference manifest contents). -func (stub NoSignaturesInitialize) PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error { - if len(signatures) != 0 { - return errors.New(stub.message) - } - return nil -} - -// SupportsSignatures implements SupportsSignatures() that returns nil. -// Note that it might be even more useful to return a value dynamically detected based on -type AlwaysSupportsSignatures struct{} - -// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. -// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. -func (stub AlwaysSupportsSignatures) SupportsSignatures(ctx context.Context) error { - return nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go deleted file mode 100644 index ab233406a4..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagedestination/stubs/stubs.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package stubs contains trivial stubs for parts of private.ImageDestination. -// It can be used from internal/wrapper, so it should not drag in any extra dependencies. -// Compare with imagedestination/impl, which might require non-trivial implementation work. -// -// There are two kinds of stubs: -// -// First, there are pure stubs, like ImplementsPutBlobPartial. Those can just be included in an imageDestination -// implementation: -// -// type yourDestination struct { -// stubs.ImplementsPutBlobPartial -// … -// } -// -// Second, there are stubs with a constructor, like NoPutBlobPartialInitialize. The Initialize marker -// means that a constructor must be called: -// -// type yourDestination struct { -// stubs.NoPutBlobPartialInitialize -// … -// } -// -// dest := &yourDestination{ -// … -// NoPutBlobPartialInitialize: stubs.NoPutBlobPartial(ref), -// } -package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go deleted file mode 100644 index 7d859c3125..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/impl/compat.go +++ /dev/null @@ -1,55 +0,0 @@ -package impl - -import ( - "context" - - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/opencontainers/go-digest" -) - -// Compat implements the obsolete parts of types.ImageSource -// for implementations of private.ImageSource. -// See AddCompat below. -type Compat struct { - src private.ImageSourceInternalOnly -} - -// AddCompat initializes Compat to implement the obsolete parts of types.ImageSource -// for implementations of private.ImageSource. -// -// Use it like this: -// -// type yourSource struct { -// impl.Compat -// … -// } -// -// src := &yourSource{…} -// src.Compat = impl.AddCompat(src) -func AddCompat(src private.ImageSourceInternalOnly) Compat { - return Compat{src} -} - -// GetSignatures returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (c *Compat) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { - // Silently ignore signatures with other formats; the caller can’t handle them. - // Admittedly callers that want to sync all of the image might want to fail instead; this - // way an upgrade of c/image neither breaks them nor adds new functionality. - // Alternatively, we could possibly define the old GetSignatures to use the multi-format - // signature.Blob representation now, in general, but that could silently break them as well. - sigs, err := c.src.GetSignaturesWithFormat(ctx, instanceDigest) - if err != nil { - return nil, err - } - simpleSigs := [][]byte{} - for _, sig := range sigs { - if sig, ok := sig.(signature.SimpleSigning); ok { - simpleSigs = append(simpleSigs, sig.UntrustedSignature()) - } - } - return simpleSigs, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go deleted file mode 100644 index d5eae63519..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/impl/layer_infos.go +++ /dev/null @@ -1,23 +0,0 @@ -package impl - -import ( - "context" - - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// DoesNotAffectLayerInfosForCopy implements LayerInfosForCopy() that returns nothing. -type DoesNotAffectLayerInfosForCopy struct{} - -// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer -// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() -// to read the image's layers. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (stub DoesNotAffectLayerInfosForCopy) LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]types.BlobInfo, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go deleted file mode 100644 index 73e8c78e95..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/impl/properties.go +++ /dev/null @@ -1,27 +0,0 @@ -package impl - -// Properties collects properties of an ImageSource that are constant throughout its lifetime -// (but might differ across instances). -type Properties struct { - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob bool -} - -// PropertyMethodsInitialize implements parts of private.ImageSource corresponding to Properties. -type PropertyMethodsInitialize struct { - // We need two separate structs, PropertyMethodsInitialize and Properties, because Go prohibits fields and methods with the same name. - - vals Properties -} - -// PropertyMethods creates an PropertyMethodsInitialize for vals. -func PropertyMethods(vals Properties) PropertyMethodsInitialize { - return PropertyMethodsInitialize{ - vals: vals, - } -} - -// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. -func (o PropertyMethodsInitialize) HasThreadSafeGetBlob() bool { - return o.vals.HasThreadSafeGetBlob -} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go b/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go deleted file mode 100644 index b3a8c7e88d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/impl/signatures.go +++ /dev/null @@ -1,19 +0,0 @@ -package impl - -import ( - "context" - - "github.com/containers/image/v5/internal/signature" - "github.com/opencontainers/go-digest" -) - -// NoSignatures implements GetSignatures() that returns nothing. -type NoSignatures struct{} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (stub NoSignatures) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - return nil, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go deleted file mode 100644 index 286ae524b1..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/get_blob_at.go +++ /dev/null @@ -1,54 +0,0 @@ -package stubs - -import ( - "context" - "fmt" - "io" - - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/types" -) - -// NoGetBlobAtInitialize implements parts of private.ImageSource -// for transports that don’t support GetBlobAt(). -// See NoGetBlobAt() below. -type NoGetBlobAtInitialize struct { - transportName string -} - -// NoGetBlobAt() creates a NoGetBlobAtInitialize for ref. -func NoGetBlobAt(ref types.ImageReference) NoGetBlobAtInitialize { - return NoGetBlobAtRaw(ref.Transport().Name()) -} - -// NoGetBlobAtRaw is the same thing as NoGetBlobAt, but it can be used -// in situations where no ImageReference is available. -func NoGetBlobAtRaw(transportName string) NoGetBlobAtInitialize { - return NoGetBlobAtInitialize{ - transportName: transportName, - } -} - -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (stub NoGetBlobAtInitialize) SupportsGetBlobAt() bool { - return false -} - -// GetBlobAt returns a sequential channel of readers that contain data for the requested -// blob chunks, and a channel that might get a single error value. -// The specified chunks must be not overlapping and sorted by their offset. -// The readers must be fully consumed, in the order they are returned, before blocking -// to read the next chunk. -// If the Length for the last chunk is set to math.MaxUint64, then it -// fully fetches the remaining data from the offset to the end of the blob. -func (stub NoGetBlobAtInitialize) GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []private.ImageSourceChunk) (chan io.ReadCloser, chan error, error) { - return nil, nil, fmt.Errorf("internal error: GetBlobAt is not supported by the %q transport", stub.transportName) -} - -// ImplementsGetBlobAt implements SupportsGetBlobAt() that returns true. -type ImplementsGetBlobAt struct{} - -// SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. -func (stub ImplementsGetBlobAt) SupportsGetBlobAt() bool { - return true -} diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go b/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go deleted file mode 100644 index cb345395e2..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/stubs/stubs.go +++ /dev/null @@ -1,28 +0,0 @@ -// Package stubs contains trivial stubs for parts of private.ImageSource. -// It can be used from internal/wrapper, so it should not drag in any extra dependencies. -// Compare with imagesource/impl, which might require non-trivial implementation work. -// -// There are two kinds of stubs: -// -// First, there are pure stubs, like ImplementsGetBlobAt. Those can just be included in an ImageSource -// -// implementation: -// -// type yourSource struct { -// stubs.ImplementsGetBlobAt -// … -// } -// -// Second, there are stubs with a constructor, like NoGetBlobAtInitialize. The Initialize marker -// means that a constructor must be called: -// -// type yourSource struct { -// stubs.NoGetBlobAtInitialize -// … -// } -// -// dest := &yourSource{ -// … -// NoGetBlobAtInitialize: stubs.NoGetBlobAt(ref), -// } -package stubs diff --git a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go b/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go deleted file mode 100644 index f0d1d042bf..0000000000 --- a/vendor/github.com/containers/image/v5/internal/imagesource/wrapper.go +++ /dev/null @@ -1,56 +0,0 @@ -package imagesource - -import ( - "context" - - "github.com/containers/image/v5/internal/imagesource/stubs" - "github.com/containers/image/v5/internal/private" - "github.com/containers/image/v5/internal/signature" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// wrapped provides the private.ImageSource operations -// for a source that only implements types.ImageSource -type wrapped struct { - stubs.NoGetBlobAtInitialize - - types.ImageSource -} - -// FromPublic(src) returns an object that provides the private.ImageSource API -// -// Eventually, we might want to expose this function, and methods of the returned object, -// as a public API (or rather, a variant that does not include the already-superseded -// methods of types.ImageSource, and has added more future-proofing), and more strongly -// deprecate direct use of types.ImageSource. -// -// NOTE: The returned API MUST NOT be a public interface (it can be either just a struct -// with public methods, or perhaps a private interface), so that we can add methods -// without breaking any external implementers of a public interface. -func FromPublic(src types.ImageSource) private.ImageSource { - if src2, ok := src.(private.ImageSource); ok { - return src2 - } - return &wrapped{ - NoGetBlobAtInitialize: stubs.NoGetBlobAt(src.Reference()), - - ImageSource: src, - } -} - -// GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. -// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for -// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list -// (e.g. if the source never returns manifest lists). -func (w *wrapped) GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) { - sigs, err := w.GetSignatures(ctx, instanceDigest) - if err != nil { - return nil, err - } - res := []signature.Signature{} - for _, sig := range sigs { - res = append(res, signature.SimpleSigningFromBlob(sig)) - } - return res, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go b/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go deleted file mode 100644 index f17d002469..0000000000 --- a/vendor/github.com/containers/image/v5/internal/iolimits/iolimits.go +++ /dev/null @@ -1,58 +0,0 @@ -package iolimits - -import ( - "fmt" - "io" -) - -// All constants below are intended to be used as limits for `ReadAtMost`. The -// immediate use-case for limiting the size of in-memory copied data is to -// protect against OOM DOS attacks as described inCVE-2020-1702. Instead of -// copying data until running out of memory, we error out after hitting the -// specified limit. -const ( - // megaByte denotes one megabyte and is intended to be used as a limit in - // `ReadAtMost`. - megaByte = 1 << 20 - // MaxManifestBodySize is the maximum allowed size of a manifest. The limit - // of 4 MB aligns with the one of a Docker registry: - // https://github.com/docker/distribution/blob/a8371794149d1d95f1e846744b05c87f2f825e5a/registry/handlers/manifests.go#L30 - MaxManifestBodySize = 4 * megaByte - // MaxAuthTokenBodySize is the maximum allowed size of an auth token. - // The limit of 1 MB is considered to be greatly sufficient. - MaxAuthTokenBodySize = megaByte - // MaxSignatureListBodySize is the maximum allowed size of a signature list. - // The limit of 4 MB is considered to be greatly sufficient. - MaxSignatureListBodySize = 4 * megaByte - // MaxSignatureBodySize is the maximum allowed size of a signature. - // The limit of 4 MB is considered to be greatly sufficient. - MaxSignatureBodySize = 4 * megaByte - // MaxErrorBodySize is the maximum allowed size of an error-response body. - // The limit of 1 MB is considered to be greatly sufficient. - MaxErrorBodySize = megaByte - // MaxConfigBodySize is the maximum allowed size of a config blob. - // The limit of 4 MB is considered to be greatly sufficient. - MaxConfigBodySize = 4 * megaByte - // MaxOpenShiftStatusBody is the maximum allowed size of an OpenShift status body. - // The limit of 4 MB is considered to be greatly sufficient. - MaxOpenShiftStatusBody = 4 * megaByte - // MaxTarFileManifestSize is the maximum allowed size of a (docker save)-like manifest (which may contain multiple images) - // The limit of 1 MB is considered to be greatly sufficient. - MaxTarFileManifestSize = megaByte -) - -// ReadAtMost reads from reader and errors out if the specified limit (in bytes) is exceeded. -func ReadAtMost(reader io.Reader, limit int) ([]byte, error) { - limitedReader := io.LimitReader(reader, int64(limit+1)) - - res, err := io.ReadAll(limitedReader) - if err != nil { - return nil, err - } - - if len(res) > limit { - return nil, fmt.Errorf("exceeded maximum allowed size of %d bytes", limit) - } - - return res, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/common.go b/vendor/github.com/containers/image/v5/internal/manifest/common.go deleted file mode 100644 index 1f2ccb5286..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/common.go +++ /dev/null @@ -1,72 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" -) - -// AllowedManifestFields is a bit mask of “essential” manifest fields that ValidateUnambiguousManifestFormat -// can expect to be present. -type AllowedManifestFields int - -const ( - AllowedFieldConfig AllowedManifestFields = 1 << iota - AllowedFieldFSLayers - AllowedFieldHistory - AllowedFieldLayers - AllowedFieldManifests - AllowedFieldFirstUnusedBit // Keep this at the end! -) - -// ValidateUnambiguousManifestFormat rejects manifests (incl. multi-arch) that look like more than -// one kind we currently recognize, i.e. if they contain any of the known “essential” format fields -// other than the ones the caller specifically allows. -// expectedMIMEType is used only for diagnostics. -// NOTE: The caller should do the non-heuristic validations (e.g. check for any specified format -// identification/version, or other “magic numbers”) before calling this, to cleanly reject unambiguous -// data that just isn’t what was expected, as opposed to actually ambiguous data. -func ValidateUnambiguousManifestFormat(manifest []byte, expectedMIMEType string, - allowed AllowedManifestFields) error { - if allowed >= AllowedFieldFirstUnusedBit { - return fmt.Errorf("internal error: invalid allowedManifestFields value %#v", allowed) - } - // Use a private type to decode, not just a map[string]any, because we want - // to also reject case-insensitive matches (which would be used by Go when really decoding - // the manifest). - // (It is expected that as manifest formats are added or extended over time, more fields will be added - // here.) - detectedFields := struct { - Config any `json:"config"` - FSLayers any `json:"fsLayers"` - History any `json:"history"` - Layers any `json:"layers"` - Manifests any `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &detectedFields); err != nil { - // The caller was supposed to already validate version numbers, so this should not happen; - // let’s not bother with making this error “nice”. - return err - } - unexpected := []string{} - // Sadly this isn’t easy to automate in Go, without reflection. So, copy&paste. - if detectedFields.Config != nil && (allowed&AllowedFieldConfig) == 0 { - unexpected = append(unexpected, "config") - } - if detectedFields.FSLayers != nil && (allowed&AllowedFieldFSLayers) == 0 { - unexpected = append(unexpected, "fsLayers") - } - if detectedFields.History != nil && (allowed&AllowedFieldHistory) == 0 { - unexpected = append(unexpected, "history") - } - if detectedFields.Layers != nil && (allowed&AllowedFieldLayers) == 0 { - unexpected = append(unexpected, "layers") - } - if detectedFields.Manifests != nil && (allowed&AllowedFieldManifests) == 0 { - unexpected = append(unexpected, "manifests") - } - if len(unexpected) != 0 { - return fmt.Errorf(`rejecting ambiguous manifest, unexpected fields %#v in supposedly %s`, - unexpected, expectedMIMEType) - } - return nil -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go deleted file mode 100644 index 68d0796978..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2.go +++ /dev/null @@ -1,15 +0,0 @@ -package manifest - -import ( - "github.com/opencontainers/go-digest" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -// -// This is publicly visible as c/image/manifest.Schema2Descriptor. -type Schema2Descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go deleted file mode 100644 index 4c1589ef02..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/docker_schema2_list.go +++ /dev/null @@ -1,311 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "slices" - - platform "github.com/containers/image/v5/internal/pkg/platform" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// Schema2PlatformSpec describes the platform which a particular manifest is -// specialized for. -// This is publicly visible as c/image/manifest.Schema2PlatformSpec. -type Schema2PlatformSpec struct { - Architecture string `json:"architecture"` - OS string `json:"os"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - Variant string `json:"variant,omitempty"` - Features []string `json:"features,omitempty"` // removed in OCI -} - -// Schema2ManifestDescriptor references a platform-specific manifest. -// This is publicly visible as c/image/manifest.Schema2ManifestDescriptor. -type Schema2ManifestDescriptor struct { - Schema2Descriptor - Platform Schema2PlatformSpec `json:"platform"` -} - -// Schema2ListPublic is a list of platform-specific manifests. -// This is publicly visible as c/image/manifest.Schema2List. -// Internal users should usually use Schema2List instead. -type Schema2ListPublic struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - Manifests []Schema2ManifestDescriptor `json:"manifests"` -} - -// MIMEType returns the MIME type of this particular manifest list. -func (list *Schema2ListPublic) MIMEType() string { - return list.MediaType -} - -// Instances returns a slice of digests of the manifests that this list knows of. -func (list *Schema2ListPublic) Instances() []digest.Digest { - results := make([]digest.Digest, len(list.Manifests)) - for i, m := range list.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the list. -func (list *Schema2ListPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range list.Manifests { - if manifest.Digest == instanceDigest { - ret := ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - } - ret.ReadOnly.CompressionAlgorithmNames = []string{compression.GzipAlgorithmName} - platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) - ret.ReadOnly.Platform = &platform - return ret, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s passed to Schema2List.Instances", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (list *Schema2ListPublic) UpdateInstances(updates []ListUpdate) error { - editInstances := []ListEdit{} - for i, instance := range updates { - editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: list.Manifests[i].Digest, - UpdateDigest: instance.Digest, - UpdateSize: instance.Size, - UpdateMediaType: instance.MediaType, - ListOperation: ListOpUpdate}) - } - return list.editInstances(editInstances) -} - -func (list *Schema2ListPublic) editInstances(editInstances []ListEdit) error { - addedEntries := []Schema2ManifestDescriptor{} - for i, editInstance := range editInstances { - switch editInstance.ListOperation { - case ListOpUpdate: - if err := editInstance.UpdateOldDigest.Validate(); err != nil { - return fmt.Errorf("Schema2List.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) - } - if err := editInstance.UpdateDigest.Validate(); err != nil { - return fmt.Errorf("Schema2List.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) - } - targetIndex := slices.IndexFunc(list.Manifests, func(m Schema2ManifestDescriptor) bool { - return m.Digest == editInstance.UpdateOldDigest - }) - if targetIndex == -1 { - return fmt.Errorf("Schema2List.EditInstances: digest %s not found", editInstance.UpdateOldDigest) - } - list.Manifests[targetIndex].Digest = editInstance.UpdateDigest - if editInstance.UpdateSize < 0 { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) - } - list.Manifests[targetIndex].Size = editInstance.UpdateSize - if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to Schema2List.UpdateInstances had no media type (was %q)", i+1, len(editInstances), list.Manifests[i].MediaType) - } - list.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType - case ListOpAdd: - if editInstance.AddPlatform == nil { - // Should we create a struct with empty fields instead? - // Right now ListOpAdd is only called when an instance with the same platform value - // already exists in the manifest, so this should not be reached in practice. - return fmt.Errorf("adding a schema2 list instance with no platform specified is not supported") - } - addedEntries = append(addedEntries, Schema2ManifestDescriptor{ - Schema2Descriptor{ - Digest: editInstance.AddDigest, - Size: editInstance.AddSize, - MediaType: editInstance.AddMediaType, - }, - schema2PlatformSpecFromOCIPlatform(*editInstance.AddPlatform), - }) - default: - return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) - } - } - if len(addedEntries) != 0 { - // slices.Clone() here to ensure a private backing array; - // an external caller could have manually created Schema2ListPublic with a slice with extra capacity. - list.Manifests = append(slices.Clone(list.Manifests), addedEntries...) - } - return nil -} - -func (list *Schema2List) EditInstances(editInstances []ListEdit) error { - return list.editInstances(editInstances) -} - -func (list *Schema2ListPublic) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - // ChooseInstanceByCompression is same as ChooseInstance for schema2 manifest list. - return list.ChooseInstance(ctx) -} - -// ChooseInstance parses blob as a schema2 manifest list, and returns the digest -// of the image which is appropriate for the current environment. -func (list *Schema2ListPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - wantedPlatforms := platform.WantedPlatforms(ctx) - for _, wantedPlatform := range wantedPlatforms { - for _, d := range list.Manifests { - imagePlatform := ociPlatformFromSchema2PlatformSpec(d.Platform) - if platform.MatchesPlatform(imagePlatform, wantedPlatform) { - return d.Digest, nil - } - } - } - return "", fmt.Errorf("no image found in manifest list for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -// Serialize returns the list in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (list *Schema2ListPublic) Serialize() ([]byte, error) { - buf, err := json.Marshal(list) - if err != nil { - return nil, fmt.Errorf("marshaling Schema2List %#v: %w", list, err) - } - return buf, nil -} - -// Schema2ListPublicFromComponents creates a Schema2 manifest list instance from the -// supplied data. -// This is publicly visible as c/image/manifest.Schema2ListFromComponents. -func Schema2ListPublicFromComponents(components []Schema2ManifestDescriptor) *Schema2ListPublic { - list := Schema2ListPublic{ - SchemaVersion: 2, - MediaType: DockerV2ListMediaType, - Manifests: make([]Schema2ManifestDescriptor, len(components)), - } - for i, component := range components { - m := Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: component.MediaType, - Size: component.Size, - Digest: component.Digest, - URLs: slices.Clone(component.URLs), - }, - Schema2PlatformSpec{ - Architecture: component.Platform.Architecture, - OS: component.Platform.OS, - OSVersion: component.Platform.OSVersion, - OSFeatures: slices.Clone(component.Platform.OSFeatures), - Variant: component.Platform.Variant, - Features: slices.Clone(component.Platform.Features), - }, - } - list.Manifests[i] = m - } - return &list -} - -// Schema2ListPublicClone creates a deep copy of the passed-in list. -// This is publicly visible as c/image/manifest.Schema2ListClone. -func Schema2ListPublicClone(list *Schema2ListPublic) *Schema2ListPublic { - return Schema2ListPublicFromComponents(list.Manifests) -} - -// ToOCI1Index returns the list encoded as an OCI1 index. -func (list *Schema2ListPublic) ToOCI1Index() (*OCI1IndexPublic, error) { - components := make([]imgspecv1.Descriptor, 0, len(list.Manifests)) - for _, manifest := range list.Manifests { - platform := ociPlatformFromSchema2PlatformSpec(manifest.Platform) - components = append(components, imgspecv1.Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: slices.Clone(manifest.URLs), - Platform: &platform, - }) - } - oci := OCI1IndexPublicFromComponents(components, nil) - return oci, nil -} - -// ToSchema2List returns the list encoded as a Schema2 list. -func (list *Schema2ListPublic) ToSchema2List() (*Schema2ListPublic, error) { - return Schema2ListPublicClone(list), nil -} - -// Schema2ListPublicFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -// This is publicly visible as c/image/manifest.Schema2ListFromManifest. -func Schema2ListPublicFromManifest(manifest []byte) (*Schema2ListPublic, error) { - list := Schema2ListPublic{ - Manifests: []Schema2ManifestDescriptor{}, - } - if err := json.Unmarshal(manifest, &list); err != nil { - return nil, fmt.Errorf("unmarshaling Schema2List %q: %w", string(manifest), err) - } - if err := ValidateUnambiguousManifestFormat(manifest, DockerV2ListMediaType, - AllowedFieldManifests); err != nil { - return nil, err - } - return &list, nil -} - -// Clone returns a deep copy of this list and its contents. -func (list *Schema2ListPublic) Clone() ListPublic { - return Schema2ListPublicClone(list) -} - -// ConvertToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func (list *Schema2ListPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return list.Clone(), nil - case imgspecv1.MediaTypeImageIndex: - return list.ToOCI1Index() - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert manifest list to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest list MIME type %s", manifestMIMEType) - } -} - -// Schema2List is a list of platform-specific manifests. -type Schema2List struct { - Schema2ListPublic -} - -func schema2ListFromPublic(public *Schema2ListPublic) *Schema2List { - return &Schema2List{*public} -} - -func (list *Schema2List) CloneInternal() List { - return schema2ListFromPublic(Schema2ListPublicClone(&list.Schema2ListPublic)) -} - -func (list *Schema2List) Clone() ListPublic { - return list.CloneInternal() -} - -// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifest []byte) (*Schema2List, error) { - public, err := Schema2ListPublicFromManifest(manifest) - if err != nil { - return nil, err - } - return schema2ListFromPublic(public), nil -} - -// ociPlatformFromSchema2PlatformSpec converts a schema2 platform p to the OCI struccture. -func ociPlatformFromSchema2PlatformSpec(p Schema2PlatformSpec) imgspecv1.Platform { - return imgspecv1.Platform{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - // Features is not supported in OCI, and discarded. - } -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/errors.go b/vendor/github.com/containers/image/v5/internal/manifest/errors.go deleted file mode 100644 index 6c8e233d97..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/errors.go +++ /dev/null @@ -1,56 +0,0 @@ -package manifest - -import ( - "fmt" - - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: This is a duplicate of c/image/manifestDockerV2Schema2ConfigMediaType. -// Deduplicate that, depending on outcome of https://github.com/containers/image/pull/1791 . -const dockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - -// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation -// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -// -// This is publicly visible as c/image/manifest.NonImageArtifactError (but we don’t provide a public constructor) -type NonImageArtifactError struct { - // Callers should not be blindly calling image-specific operations and only checking MIME types - // on failure; if they care about the artifact type, they should check before using it. - // If they blindly assume an image, they don’t really need this value; just a type check - // is sufficient for basic "we can only pull images" UI. - // - // Also, there are fairly widespread “artifacts” which nevertheless use imgspecv1.MediaTypeImageConfig, - // e.g. https://github.com/sigstore/cosign/blob/main/specs/SIGNATURE_SPEC.md , which could cause the callers - // to complain about a non-image artifact with the correct MIME type; we should probably add some other kind of - // type discrimination, _and_ somehow make it available in the API, if we expect API callers to make decisions - // based on that kind of data. - // - // So, let’s not expose this until a specific need is identified. - mimeType string -} - -// NewNonImageArtifactError returns a NonImageArtifactError about an artifact manifest. -// -// This is typically called if manifest.Config.MediaType != imgspecv1.MediaTypeImageConfig . -func NewNonImageArtifactError(manifest *imgspecv1.Manifest) error { - // Callers decide based on manifest.Config.MediaType that this is not an image; - // in that case manifest.ArtifactType can be optionally defined, and if it is, it is typically - // more relevant because config may be ~absent with imgspecv1.MediaTypeEmptyJSON. - // - // If ArtifactType and Config.MediaType are both defined and non-trivial, presumably - // ArtifactType is the “top-level” one, although that’s not defined by the spec. - mimeType := manifest.ArtifactType - if mimeType == "" { - mimeType = manifest.Config.MediaType - } - return NonImageArtifactError{mimeType: mimeType} -} - -func (e NonImageArtifactError) Error() string { - // Special-case these invalid mixed images, which show up from time to time: - if e.mimeType == dockerV2Schema2ConfigMediaType { - return fmt.Sprintf("invalid mixed OCI image with Docker v2s2 config (%q)", e.mimeType) - } - return fmt.Sprintf("unsupported image-specific operation on artifact with type %q", e.mimeType) -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/list.go b/vendor/github.com/containers/image/v5/internal/manifest/list.go deleted file mode 100644 index 1c614d1246..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/list.go +++ /dev/null @@ -1,133 +0,0 @@ -package manifest - -import ( - "fmt" - - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ListPublic is a subset of List which is a part of the public API; -// so no methods can be added, removed or changed. -// -// Internal users should usually use List instead. -type ListPublic interface { - // MIMEType returns the MIME type of this particular manifest list. - MIMEType() string - - // Instances returns a list of the manifests that this list knows of, other than its own. - Instances() []digest.Digest - - // Update information about the list's instances. The length of the passed-in slice must - // match the length of the list of instances which the list already contains, and every field - // must be specified. - UpdateInstances([]ListUpdate) error - - // Instance returns the size and MIME type of a particular instance in the list. - Instance(digest.Digest) (ListUpdate, error) - - // ChooseInstance selects which manifest is most appropriate for the platform described by the - // SystemContext, or for the current platform if the SystemContext doesn't specify any details. - ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) - - // Serialize returns the list in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded - // from, even if no modifications were made! - Serialize() ([]byte, error) - - // ConvertToMIMEType returns the list rebuilt to the specified MIME type, or an error. - ConvertToMIMEType(mimeType string) (ListPublic, error) - - // Clone returns a deep copy of this list and its contents. - Clone() ListPublic -} - -// List is an interface for parsing, modifying lists of image manifests. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members -// directly. -type List interface { - ListPublic - // CloneInternal returns a deep copy of this list and its contents. - CloneInternal() List - // ChooseInstanceInstanceByCompression selects which manifest is most appropriate for the platform and compression described by the - // SystemContext ( or for the current platform if the SystemContext doesn't specify any detail ) and preferGzip for compression which - // when configured to OptionalBoolTrue and chooses best available compression when it is OptionalBoolFalse or left OptionalBoolUndefined. - ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) - // Edit information about the list's instances. Contains Slice of ListEdit where each element - // is responsible for either Modifying or Adding a new instance to the Manifest. Operation is - // selected on the basis of configured ListOperation field. - EditInstances([]ListEdit) error -} - -// ListUpdate includes the fields which a List's UpdateInstances() method will modify. -// This is publicly visible as c/image/manifest.ListUpdate. -type ListUpdate struct { - Digest digest.Digest - Size int64 - MediaType string - // ReadOnly fields: may be set by Instance(), ignored by UpdateInstance() - ReadOnly struct { - Platform *imgspecv1.Platform - Annotations map[string]string - CompressionAlgorithmNames []string - ArtifactType string - } -} - -type ListOp int - -const ( - listOpInvalid ListOp = iota - ListOpAdd - ListOpUpdate -) - -// ListEdit includes the fields which a List's EditInstances() method will modify. -type ListEdit struct { - ListOperation ListOp - - // if Op == ListEditUpdate (basically the previous UpdateInstances). All fields must be set. - UpdateOldDigest digest.Digest - UpdateDigest digest.Digest - UpdateSize int64 - UpdateMediaType string - UpdateAffectAnnotations bool - UpdateAnnotations map[string]string - UpdateCompressionAlgorithms []compression.Algorithm - - // If Op = ListEditAdd. All fields must be set. - AddDigest digest.Digest - AddSize int64 - AddMediaType string - AddArtifactType string - AddPlatform *imgspecv1.Platform - AddAnnotations map[string]string - AddCompressionAlgorithms []compression.Algorithm -} - -// ListPublicFromBlob parses a list of manifests. -// This is publicly visible as c/image/manifest.ListFromBlob. -func ListPublicFromBlob(manifest []byte, manifestMIMEType string) (ListPublic, error) { - list, err := ListFromBlob(manifest, manifestMIMEType) - if err != nil { - return nil, err - } - return list, nil -} - -// ListFromBlob parses a list of manifests. -func ListFromBlob(manifest []byte, manifestMIMEType string) (List, error) { - normalized := NormalizedMIMEType(manifestMIMEType) - switch normalized { - case DockerV2ListMediaType: - return Schema2ListFromManifest(manifest) - case imgspecv1.MediaTypeImageIndex: - return OCI1IndexFromManifest(manifest) - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Treating single images as manifest lists is not implemented") - } - return nil, fmt.Errorf("Unimplemented manifest list MIME type %q (normalized as %q)", manifestMIMEType, normalized) -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go b/vendor/github.com/containers/image/v5/internal/manifest/manifest.go deleted file mode 100644 index 3fb52104a6..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/manifest.go +++ /dev/null @@ -1,226 +0,0 @@ -package manifest - -import ( - "encoding/json" - "slices" - - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/libtrust" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we have a mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json" - // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json" - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json" - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip" - // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = "application/vnd.docker.image.rootfs.diff.tar" - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar" - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -// This is publicly visible as c/image/manifest.GuessMIMEType. -func GuessMIMEType(manifest []byte) string { - // A subset of manifest fields; the rest is silently ignored by json.Unmarshal. - // Also docker/distribution/manifest.Versioned. - meta := struct { - MediaType string `json:"mediaType"` - SchemaVersion int `json:"schemaVersion"` - Signatures any `json:"signatures"` - }{} - if err := json.Unmarshal(manifest, &meta); err != nil { - return "" - } - - switch meta.MediaType { - case DockerV2Schema2MediaType, DockerV2ListMediaType, - imgspecv1.MediaTypeImageManifest, imgspecv1.MediaTypeImageIndex: // A recognized type. - return meta.MediaType - } - // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest. - switch meta.SchemaVersion { - case 1: - if meta.Signatures != nil { - return DockerV2Schema1SignedMediaType - } - return DockerV2Schema1MediaType - case 2: - // Best effort to understand if this is an OCI image since mediaType - // wasn't in the manifest for OCI image-spec < 1.0.2. - // For docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess. - ociMan := struct { - Config struct { - MediaType string `json:"mediaType"` - } `json:"config"` - }{} - if err := json.Unmarshal(manifest, &ociMan); err != nil { - return "" - } - switch ociMan.Config.MediaType { - case imgspecv1.MediaTypeImageConfig: - return imgspecv1.MediaTypeImageManifest - case DockerV2Schema2ConfigMediaType: - // This case should not happen since a Docker image - // must declare a top-level media type and - // `meta.MediaType` has already been checked. - return DockerV2Schema2MediaType - } - // Maybe an image index or an OCI artifact. - ociIndex := struct { - Manifests []imgspecv1.Descriptor `json:"manifests"` - }{} - if err := json.Unmarshal(manifest, &ociIndex); err != nil { - return "" - } - if len(ociIndex.Manifests) != 0 { - if ociMan.Config.MediaType == "" { - return imgspecv1.MediaTypeImageIndex - } - // FIXME: this is mixing media types of manifests and configs. - return ociMan.Config.MediaType - } - // It's most likely an OCI artifact with a custom config media - // type which is not (and cannot) be covered by the media-type - // checks cabove. - return imgspecv1.MediaTypeImageManifest - } - return "" -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -// This is publicly visible as c/image/manifest.Digest. -func Digest(manifest []byte) (digest.Digest, error) { - if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType { - sig, err := libtrust.ParsePrettySignature(manifest, "signatures") - if err != nil { - return "", err - } - manifest, err = sig.Payload() - if err != nil { - // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string - // that libtrust itself has josebase64UrlEncode()d - return "", err - } - } - - return digest.FromBytes(manifest), nil -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -// This is publicly visible as c/image/manifest.MatchesDigest. -func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) { - // This should eventually support various digest types. - actualDigest, err := Digest(manifest) - if err != nil { - return false, err - } - return expectedDigest == actualDigest, nil -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -// This is publicly visible as c/image/manifest.NormalizedMIMEType. -func NormalizedMIMEType(input string) string { - switch input { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case "application/json": - return DockerV2Schema1SignedMediaType - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeImageIndex, - DockerV2Schema2MediaType, - DockerV2ListMediaType: - return input - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return DockerV2Schema1SignedMediaType - } -} - -// CompressionAlgorithmIsUniversallySupported returns true if MIMETypeSupportsCompressionAlgorithm(mimeType, algo) returns true for all mimeType values. -func CompressionAlgorithmIsUniversallySupported(algo compressiontypes.Algorithm) bool { - // Compare the discussion about BaseVariantName in MIMETypeSupportsCompressionAlgorithm(). - switch algo.Name() { - case compressiontypes.GzipAlgorithmName: - return true - default: - return false - } -} - -// MIMETypeSupportsCompressionAlgorithm returns true if mimeType can represent algo. -func MIMETypeSupportsCompressionAlgorithm(mimeType string, algo compressiontypes.Algorithm) bool { - if CompressionAlgorithmIsUniversallySupported(algo) { - return true - } - // This does not use BaseVariantName: Plausibly a manifest format might support zstd but not have annotation fields. - // The logic might have to be more complex (and more ad-hoc) if more manifest formats, with more capabilities, emerge. - switch algo.Name() { - case compressiontypes.ZstdAlgorithmName, compressiontypes.ZstdChunkedAlgorithmName: - return mimeType == imgspecv1.MediaTypeImageManifest - default: // Includes Bzip2AlgorithmName and XzAlgorithmName, which are defined names but are not supported anywhere - return false - } -} - -// ReuseConditions are an input to CandidateCompressionMatchesReuseConditions; -// it is a struct to allow longer and better-documented field names. -type ReuseConditions struct { - PossibleManifestFormats []string // If set, a set of possible manifest formats; at least one should support the reused layer - RequiredCompression *compressiontypes.Algorithm // If set, only reuse layers with a matching algorithm -} - -// CandidateCompressionMatchesReuseConditions returns true if a layer with candidateCompression -// (which can be nil to represent uncompressed or unknown) matches reuseConditions. -func CandidateCompressionMatchesReuseConditions(c ReuseConditions, candidateCompression *compressiontypes.Algorithm) bool { - if c.RequiredCompression != nil { - if candidateCompression == nil || - (c.RequiredCompression.Name() != candidateCompression.Name() && c.RequiredCompression.Name() != candidateCompression.BaseVariantName()) { - return false - } - } - - // For candidateCompression == nil, we can’t tell the difference between “uncompressed” and “unknown”; - // and “uncompressed” is acceptable in all known formats (well, it seems to work in practice for schema1), - // so don’t impose any restrictions if candidateCompression == nil - if c.PossibleManifestFormats != nil && candidateCompression != nil { - if !slices.ContainsFunc(c.PossibleManifestFormats, func(mt string) bool { - return MIMETypeSupportsCompressionAlgorithm(mt, *candidateCompression) - }) { - return false - } - } - - return true -} diff --git a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go b/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go deleted file mode 100644 index 719deccbb2..0000000000 --- a/vendor/github.com/containers/image/v5/internal/manifest/oci_index.go +++ /dev/null @@ -1,466 +0,0 @@ -package manifest - -import ( - "bytes" - "encoding/json" - "fmt" - "maps" - "math" - "runtime" - "slices" - - platform "github.com/containers/image/v5/internal/pkg/platform" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspec "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -const ( - // OCI1InstanceAnnotationCompressionZSTD is an annotation name that can be placed on a manifest descriptor in an OCI index. - // The value of the annotation must be the string "true". - // If this annotation is present on a manifest, consuming that image instance requires support for Zstd compression. - // That also suggests that this instance benefits from - // Zstd compression, so it can be preferred by compatible consumers over instances that - // use gzip, depending on their local policy. - OCI1InstanceAnnotationCompressionZSTD = "io.github.containers.compression.zstd" - OCI1InstanceAnnotationCompressionZSTDValue = "true" -) - -// OCI1IndexPublic is just an alias for the OCI index type, but one which we can -// provide methods for. -// This is publicly visible as c/image/manifest.OCI1Index -// Internal users should usually use OCI1Index instead. -type OCI1IndexPublic struct { - imgspecv1.Index -} - -// MIMEType returns the MIME type of this particular manifest index. -func (index *OCI1IndexPublic) MIMEType() string { - return imgspecv1.MediaTypeImageIndex -} - -// Instances returns a slice of digests of the manifests that this index knows of. -func (index *OCI1IndexPublic) Instances() []digest.Digest { - results := make([]digest.Digest, len(index.Manifests)) - for i, m := range index.Manifests { - results[i] = m.Digest - } - return results -} - -// Instance returns the ListUpdate of a particular instance in the index. -func (index *OCI1IndexPublic) Instance(instanceDigest digest.Digest) (ListUpdate, error) { - for _, manifest := range index.Manifests { - if manifest.Digest == instanceDigest { - ret := ListUpdate{ - Digest: manifest.Digest, - Size: manifest.Size, - MediaType: manifest.MediaType, - } - ret.ReadOnly.Platform = manifest.Platform - ret.ReadOnly.Annotations = manifest.Annotations - ret.ReadOnly.CompressionAlgorithmNames = annotationsToCompressionAlgorithmNames(manifest.Annotations) - ret.ReadOnly.ArtifactType = manifest.ArtifactType - return ret, nil - } - } - return ListUpdate{}, fmt.Errorf("unable to find instance %s in OCI1Index", instanceDigest) -} - -// UpdateInstances updates the sizes, digests, and media types of the manifests -// which the list catalogs. -func (index *OCI1IndexPublic) UpdateInstances(updates []ListUpdate) error { - editInstances := []ListEdit{} - for i, instance := range updates { - editInstances = append(editInstances, ListEdit{ - UpdateOldDigest: index.Manifests[i].Digest, - UpdateDigest: instance.Digest, - UpdateSize: instance.Size, - UpdateMediaType: instance.MediaType, - ListOperation: ListOpUpdate}) - } - return index.editInstances(editInstances) -} - -func annotationsToCompressionAlgorithmNames(annotations map[string]string) []string { - result := make([]string, 0, 1) - if annotations[OCI1InstanceAnnotationCompressionZSTD] == OCI1InstanceAnnotationCompressionZSTDValue { - result = append(result, compression.ZstdAlgorithmName) - } - // No compression was detected, hence assume instance has default compression `Gzip` - if len(result) == 0 { - result = append(result, compression.GzipAlgorithmName) - } - return result -} - -func addCompressionAnnotations(compressionAlgorithms []compression.Algorithm, annotationsMap *map[string]string) { - // TODO: This should also delete the algorithm if map already contains an algorithm and compressionAlgorithm - // list has a different algorithm. To do that, we would need to modify the callers to always provide a reliable - // and full compressionAlghorithms list. - if *annotationsMap == nil && len(compressionAlgorithms) > 0 { - *annotationsMap = map[string]string{} - } - for _, algo := range compressionAlgorithms { - switch algo.BaseVariantName() { - case compression.ZstdAlgorithmName: - (*annotationsMap)[OCI1InstanceAnnotationCompressionZSTD] = OCI1InstanceAnnotationCompressionZSTDValue - default: - continue - } - } -} - -func (index *OCI1IndexPublic) editInstances(editInstances []ListEdit) error { - addedEntries := []imgspecv1.Descriptor{} - updatedAnnotations := false - for i, editInstance := range editInstances { - switch editInstance.ListOperation { - case ListOpUpdate: - if err := editInstance.UpdateOldDigest.Validate(); err != nil { - return fmt.Errorf("OCI1Index.EditInstances: Attempting to update %s which is an invalid digest: %w", editInstance.UpdateOldDigest, err) - } - if err := editInstance.UpdateDigest.Validate(); err != nil { - return fmt.Errorf("OCI1Index.EditInstances: Modified digest %s is an invalid digest: %w", editInstance.UpdateDigest, err) - } - targetIndex := slices.IndexFunc(index.Manifests, func(m imgspecv1.Descriptor) bool { - return m.Digest == editInstance.UpdateOldDigest - }) - if targetIndex == -1 { - return fmt.Errorf("OCI1Index.EditInstances: digest %s not found", editInstance.UpdateOldDigest) - } - index.Manifests[targetIndex].Digest = editInstance.UpdateDigest - if editInstance.UpdateSize < 0 { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had an invalid size (%d)", i+1, len(editInstances), editInstance.UpdateSize) - } - index.Manifests[targetIndex].Size = editInstance.UpdateSize - if editInstance.UpdateMediaType == "" { - return fmt.Errorf("update %d of %d passed to OCI1Index.UpdateInstances had no media type (was %q)", i+1, len(editInstances), index.Manifests[i].MediaType) - } - index.Manifests[targetIndex].MediaType = editInstance.UpdateMediaType - if editInstance.UpdateAnnotations != nil { - updatedAnnotations = true - if editInstance.UpdateAffectAnnotations { - index.Manifests[targetIndex].Annotations = maps.Clone(editInstance.UpdateAnnotations) - } else { - if index.Manifests[targetIndex].Annotations == nil { - index.Manifests[targetIndex].Annotations = map[string]string{} - } - maps.Copy(index.Manifests[targetIndex].Annotations, editInstance.UpdateAnnotations) - } - } - addCompressionAnnotations(editInstance.UpdateCompressionAlgorithms, &index.Manifests[targetIndex].Annotations) - case ListOpAdd: - annotations := map[string]string{} - if editInstance.AddAnnotations != nil { - annotations = maps.Clone(editInstance.AddAnnotations) - } - addCompressionAnnotations(editInstance.AddCompressionAlgorithms, &annotations) - addedEntries = append(addedEntries, imgspecv1.Descriptor{ - MediaType: editInstance.AddMediaType, - ArtifactType: editInstance.AddArtifactType, - Size: editInstance.AddSize, - Digest: editInstance.AddDigest, - Platform: editInstance.AddPlatform, - Annotations: annotations, - }) - default: - return fmt.Errorf("internal error: invalid operation: %d", editInstance.ListOperation) - } - } - if len(addedEntries) != 0 { - // slices.Clone() here to ensure the slice uses a private backing array; - // an external caller could have manually created OCI1IndexPublic with a slice with extra capacity. - index.Manifests = append(slices.Clone(index.Manifests), addedEntries...) - } - if len(addedEntries) != 0 || updatedAnnotations { - slices.SortStableFunc(index.Manifests, func(a, b imgspecv1.Descriptor) int { - // FIXME? With Go 1.21 and cmp.Compare available, turn instanceIsZstd into an integer score that can be compared, and generalizes - // into more algorithms? - aZstd := instanceIsZstd(a) - bZstd := instanceIsZstd(b) - switch { - case aZstd == bZstd: - return 0 - case !aZstd: // Implies bZstd - return -1 - default: // aZstd && !bZstd - return 1 - } - }) - } - return nil -} - -func (index *OCI1Index) EditInstances(editInstances []ListEdit) error { - return index.editInstances(editInstances) -} - -// instanceIsZstd returns true if instance is a zstd instance otherwise false. -func instanceIsZstd(manifest imgspecv1.Descriptor) bool { - if value, ok := manifest.Annotations[OCI1InstanceAnnotationCompressionZSTD]; ok && value == "true" { - return true - } - return false -} - -type instanceCandidate struct { - platformIndex int // Index of the candidate in platform.WantedPlatforms: lower numbers are preferred; or math.maxInt if the candidate doesn’t have a platform - isZstd bool // tells if particular instance if zstd instance - manifestPosition int // A zero-based index of the instance in the manifest list - digest digest.Digest // Instance digest -} - -func (ic instanceCandidate) isPreferredOver(other *instanceCandidate, preferGzip types.OptionalBool) bool { - switch { - case ic.platformIndex != other.platformIndex: - return ic.platformIndex < other.platformIndex - case ic.isZstd != other.isZstd: - if preferGzip != types.OptionalBoolTrue { - return ic.isZstd - } else { - return !ic.isZstd - } - case ic.manifestPosition != other.manifestPosition: - return ic.manifestPosition < other.manifestPosition - } - panic("internal error: invalid comparison between two candidates") // This should not be reachable because in all calls we make, the two candidates differ at least in manifestPosition. -} - -// chooseInstance is a private equivalent to ChooseInstanceByCompression, -// shared by ChooseInstance and ChooseInstanceByCompression. -func (index *OCI1IndexPublic) chooseInstance(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - wantedPlatforms := platform.WantedPlatforms(ctx) - var bestMatch *instanceCandidate - bestMatch = nil - for manifestIndex, d := range index.Manifests { - candidate := instanceCandidate{platformIndex: math.MaxInt, manifestPosition: manifestIndex, isZstd: instanceIsZstd(d), digest: d.Digest} - if d.Platform != nil { - imagePlatform := ociPlatformClone(*d.Platform) - platformIndex := slices.IndexFunc(wantedPlatforms, func(wantedPlatform imgspecv1.Platform) bool { - return platform.MatchesPlatform(imagePlatform, wantedPlatform) - }) - if platformIndex == -1 { - continue - } - candidate.platformIndex = platformIndex - } - if bestMatch == nil || candidate.isPreferredOver(bestMatch, preferGzip) { - bestMatch = &candidate - } - } - if bestMatch != nil { - return bestMatch.digest, nil - } - return "", fmt.Errorf("no image found in image index for architecture %q, variant %q, OS %q", wantedPlatforms[0].Architecture, wantedPlatforms[0].Variant, wantedPlatforms[0].OS) -} - -func (index *OCI1Index) ChooseInstanceByCompression(ctx *types.SystemContext, preferGzip types.OptionalBool) (digest.Digest, error) { - return index.chooseInstance(ctx, preferGzip) -} - -// ChooseInstance parses blob as an oci v1 manifest index, and returns the digest -// of the image which is appropriate for the current environment. -func (index *OCI1IndexPublic) ChooseInstance(ctx *types.SystemContext) (digest.Digest, error) { - return index.chooseInstance(ctx, types.OptionalBoolFalse) -} - -// Serialize returns the index in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (index *OCI1IndexPublic) Serialize() ([]byte, error) { - buf, err := json.Marshal(index) - if err != nil { - return nil, fmt.Errorf("marshaling OCI1Index %#v: %w", index, err) - } - return buf, nil -} - -// OCI1IndexPublicFromComponents creates an OCI1 image index instance from the -// supplied data. -// This is publicly visible as c/image/manifest.OCI1IndexFromComponents. -func OCI1IndexPublicFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1IndexPublic { - index := OCI1IndexPublic{ - imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: make([]imgspecv1.Descriptor, len(components)), - Annotations: maps.Clone(annotations), - }, - } - for i, component := range components { - index.Manifests[i] = oci1DescriptorClone(component) - } - return &index -} - -func oci1DescriptorClone(d imgspecv1.Descriptor) imgspecv1.Descriptor { - var platform *imgspecv1.Platform - if d.Platform != nil { - platformCopy := ociPlatformClone(*d.Platform) - platform = &platformCopy - } - return imgspecv1.Descriptor{ - MediaType: d.MediaType, - Digest: d.Digest, - Size: d.Size, - URLs: slices.Clone(d.URLs), - Annotations: maps.Clone(d.Annotations), - Data: bytes.Clone(d.Data), - Platform: platform, - ArtifactType: d.ArtifactType, - } -} - -// OCI1IndexPublicClone creates a deep copy of the passed-in index. -// This is publicly visible as c/image/manifest.OCI1IndexClone. -func OCI1IndexPublicClone(index *OCI1IndexPublic) *OCI1IndexPublic { - var subject *imgspecv1.Descriptor - if index.Subject != nil { - s := oci1DescriptorClone(*index.Subject) - subject = &s - } - manifests := make([]imgspecv1.Descriptor, len(index.Manifests)) - for i, m := range index.Manifests { - manifests[i] = oci1DescriptorClone(m) - } - return &OCI1IndexPublic{ - Index: imgspecv1.Index{ - Versioned: index.Versioned, - MediaType: index.MediaType, - ArtifactType: index.ArtifactType, - Manifests: manifests, - Subject: subject, - Annotations: maps.Clone(index.Annotations), - }, - } -} - -// ToOCI1Index returns the index encoded as an OCI1 index. -func (index *OCI1IndexPublic) ToOCI1Index() (*OCI1IndexPublic, error) { - return OCI1IndexPublicClone(index), nil -} - -// ToSchema2List returns the index encoded as a Schema2 list. -func (index *OCI1IndexPublic) ToSchema2List() (*Schema2ListPublic, error) { - components := make([]Schema2ManifestDescriptor, 0, len(index.Manifests)) - for _, manifest := range index.Manifests { - platform := manifest.Platform - if platform == nil { - platform = &imgspecv1.Platform{ - OS: runtime.GOOS, - Architecture: runtime.GOARCH, - } - } - components = append(components, Schema2ManifestDescriptor{ - Schema2Descriptor{ - MediaType: manifest.MediaType, - Size: manifest.Size, - Digest: manifest.Digest, - URLs: slices.Clone(manifest.URLs), - }, - schema2PlatformSpecFromOCIPlatform(*platform), - }) - } - s2 := Schema2ListPublicFromComponents(components) - return s2, nil -} - -// OCI1IndexPublicFromManifest creates an OCI1 manifest index instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest index. -// This is publicly visible as c/image/manifest.OCI1IndexFromManifest. -func OCI1IndexPublicFromManifest(manifest []byte) (*OCI1IndexPublic, error) { - index := OCI1IndexPublic{ - Index: imgspecv1.Index{ - Versioned: imgspec.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageIndex, - Manifests: []imgspecv1.Descriptor{}, - Annotations: make(map[string]string), - }, - } - if err := json.Unmarshal(manifest, &index); err != nil { - return nil, fmt.Errorf("unmarshaling OCI1Index %q: %w", string(manifest), err) - } - if err := ValidateUnambiguousManifestFormat(manifest, imgspecv1.MediaTypeImageIndex, - AllowedFieldManifests); err != nil { - return nil, err - } - return &index, nil -} - -// Clone returns a deep copy of this list and its contents. -func (index *OCI1IndexPublic) Clone() ListPublic { - return OCI1IndexPublicClone(index) -} - -// ConvertToMIMEType converts the passed-in image index to a manifest list of -// the specified type. -func (index *OCI1IndexPublic) ConvertToMIMEType(manifestMIMEType string) (ListPublic, error) { - switch normalized := NormalizedMIMEType(manifestMIMEType); normalized { - case DockerV2ListMediaType: - return index.ToSchema2List() - case imgspecv1.MediaTypeImageIndex: - return index.Clone(), nil - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, imgspecv1.MediaTypeImageManifest, DockerV2Schema2MediaType: - return nil, fmt.Errorf("Can not convert image index to MIME type %q, which is not a list type", manifestMIMEType) - default: - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %s", manifestMIMEType) - } -} - -type OCI1Index struct { - OCI1IndexPublic -} - -func oci1IndexFromPublic(public *OCI1IndexPublic) *OCI1Index { - return &OCI1Index{*public} -} - -func (index *OCI1Index) CloneInternal() List { - return oci1IndexFromPublic(OCI1IndexPublicClone(&index.OCI1IndexPublic)) -} - -func (index *OCI1Index) Clone() ListPublic { - return index.CloneInternal() -} - -// OCI1IndexFromManifest creates a OCI1 manifest list instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest list. -func OCI1IndexFromManifest(manifest []byte) (*OCI1Index, error) { - public, err := OCI1IndexPublicFromManifest(manifest) - if err != nil { - return nil, err - } - return oci1IndexFromPublic(public), nil -} - -// ociPlatformClone returns an independent copy of p. -func ociPlatformClone(p imgspecv1.Platform) imgspecv1.Platform { - // The only practical way in Go to give read-only access to an array is to copy it. - // The only practical way in Go to copy a deep structure is to either do it manually field by field, - // or to use reflection (incl. a round-trip through JSON, which uses reflection). - // - // The combination of the two is just sad, and leads to code like this, which will - // need to be updated with every new Platform field. - return imgspecv1.Platform{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - } -} - -// schema2PlatformSpecFromOCIPlatform converts an OCI platform p to the schema2 structure. -func schema2PlatformSpecFromOCIPlatform(p imgspecv1.Platform) Schema2PlatformSpec { - return Schema2PlatformSpec{ - Architecture: p.Architecture, - OS: p.OS, - OSVersion: p.OSVersion, - OSFeatures: slices.Clone(p.OSFeatures), - Variant: p.Variant, - Features: nil, - } -} diff --git a/vendor/github.com/containers/image/v5/internal/multierr/multierr.go b/vendor/github.com/containers/image/v5/internal/multierr/multierr.go deleted file mode 100644 index 1341925c1d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/multierr/multierr.go +++ /dev/null @@ -1,34 +0,0 @@ -package multierr - -import ( - "fmt" - "strings" -) - -// Format creates an error value from the input array (which should not be empty) -// If the input contains a single error value, it is returned as is. -// If there are multiple, they are formatted as a multi-error (with Unwrap() []error) with the provided initial, separator, and ending strings. -// -// Typical usage: -// -// var errs []error -// // … -// errs = append(errs, …) -// // … -// if errs != nil { return multierr.Format("Failures doing $FOO", "\n* ", "", errs)} -func Format(first, middle, last string, errs []error) error { - switch len(errs) { - case 0: - return fmt.Errorf("internal error: multierr.Format called with 0 errors") - case 1: - return errs[0] - default: - // We have to do this — and this function only really exists — because fmt.Errorf(format, errs...) is invalid: - // []error is not a valid parameter to a function expecting []any - anyErrs := make([]any, 0, len(errs)) - for _, e := range errs { - anyErrs = append(anyErrs, e) - } - return fmt.Errorf(first+"%w"+strings.Repeat(middle+"%w", len(errs)-1)+last, anyErrs...) - } -} diff --git a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go b/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go deleted file mode 100644 index 3a16dad637..0000000000 --- a/vendor/github.com/containers/image/v5/internal/pkg/platform/platform_matcher.go +++ /dev/null @@ -1,223 +0,0 @@ -package platform - -// Largely based on -// https://github.com/moby/moby/blob/bc846d2e8fe5538220e0c31e9d0e8446f6fbc022/distribution/cpuinfo_unix.go -// Copyright 2012-2017 Docker, Inc. -// -// https://github.com/containerd/containerd/blob/726dcaea50883e51b2ec6db13caff0e7936b711d/platforms/cpuinfo.go -// Copyright The containerd Authors. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// https://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -import ( - "bufio" - "fmt" - "os" - "runtime" - "slices" - "strings" - - "github.com/containers/image/v5/types" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" - "github.com/sirupsen/logrus" -) - -// For Linux, the kernel has already detected the ABI, ISA and Features. -// So we don't need to access the ARM registers to detect platform information -// by ourselves. We can just parse these information from /proc/cpuinfo -func getCPUInfo(pattern string) (info string, err error) { - if runtime.GOOS != "linux" { - return "", fmt.Errorf("getCPUInfo for OS %s not implemented", runtime.GOOS) - } - - cpuinfo, err := os.Open("/proc/cpuinfo") - if err != nil { - return "", err - } - defer cpuinfo.Close() - - // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse - // the first core is enough. - scanner := bufio.NewScanner(cpuinfo) - for scanner.Scan() { - newline := scanner.Text() - list := strings.Split(newline, ":") - - if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { - return strings.TrimSpace(list[1]), nil - } - } - - // Check whether the scanner encountered errors - err = scanner.Err() - if err != nil { - return "", err - } - - return "", fmt.Errorf("getCPUInfo for pattern: %s not found", pattern) -} - -func getCPUVariantDarwinWindows(arch string) string { - // Darwin and Windows only support v7 for ARM32 and v8 for ARM64 and so we can use - // runtime.GOARCH to determine the variants - var variant string - switch arch { - case "arm64": - variant = "v8" - case "arm": - variant = "v7" - default: - variant = "" - } - - return variant -} - -func getCPUVariantArm() string { - variant, err := getCPUInfo("Cpu architecture") - if err != nil { - logrus.Errorf("Couldn't get cpu architecture: %v", err) - return "" - } - - switch strings.ToLower(variant) { - case "8", "aarch64": - variant = "v8" - case "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": - variant = "v7" - case "7": - // handle RPi Zero variant mismatch due to wrong variant from kernel - // https://github.com/containerd/containerd/pull/4530 - // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 - // https://github.com/moby/moby/pull/36121#issuecomment-398328286 - model, err := getCPUInfo("model name") - if err != nil { - logrus.Errorf("Couldn't get cpu model name, it may be the corner case where variant is 6: %v", err) - return "" - } - // model name is NOT a value provided by the CPU; it is another outcome of Linux CPU detection, - // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L178C35-L178C35 - // (matching happens based on value + mask at https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v6.S#L273-L274 ) - // ARM CPU ID starts with a “main” ID register https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/System-Control-Registers-in-a-VMSA-implementation/VMSA-System-control-registers-descriptions--in-register-order/MIDR--Main-ID-Register--VMSA?lang=en , - // but the ARMv6/ARMv7 differences are not a single dimension, https://developer.arm.com/documentation/ddi0406/cb/System-Level-Architecture/The-CPUID-Identification-Scheme?lang=en . - // The Linux "cpu architecture" is determined by a “memory model” feature. - // - // So, the "armv6-compatible" check basically checks for a "v6 or v7 CPU, but not one found listed as a known v7 one in the .proc.info.init tables of - // https://github.com/torvalds/linux/blob/190bf7b14b0cf3df19c059061be032bd8994a597/arch/arm/mm/proc-v7.S . - if strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { - logrus.Debugf("Detected corner case, setting cpu variant to v6") - variant = "v6" - } else { - variant = "v7" - } - case "6", "6tej": - variant = "v6" - case "5", "5t", "5te", "5tej": - variant = "v5" - case "4", "4t": - variant = "v4" - case "3": - variant = "v3" - default: - variant = "" - } - - return variant -} - -func getCPUVariant(os string, arch string) string { - if os == "darwin" || os == "windows" { - return getCPUVariantDarwinWindows(arch) - } - if arch == "arm" || arch == "arm64" { - return getCPUVariantArm() - } - return "" -} - -// compatibility contains, for a specified architecture, a list of known variants, in the -// order from most capable (most restrictive) to least capable (most compatible). -// Architectures that don’t have variants should not have an entry here. -var compatibility = map[string][]string{ - "arm": {"v8", "v7", "v6", "v5"}, - "arm64": {"v8"}, -} - -// WantedPlatforms returns all compatible platforms with the platform specifics possibly overridden by user, -// the most compatible platform is first. -// If some option (arch, os, variant) is not present, a value from current platform is detected. -func WantedPlatforms(ctx *types.SystemContext) []imgspecv1.Platform { - // Note that this does not use Platform.OSFeatures and Platform.OSVersion at all. - // The fields are not specified by the OCI specification, as of version 1.1, usefully enough - // to be interoperable, anyway. - - wantedArch := runtime.GOARCH - wantedVariant := "" - if ctx != nil && ctx.ArchitectureChoice != "" { - wantedArch = ctx.ArchitectureChoice - } else { - // Only auto-detect the variant if we are using the default architecture. - // If the user has specified the ArchitectureChoice, don't autodetect, even if - // ctx.ArchitectureChoice == runtime.GOARCH, because we have no idea whether the runtime.GOARCH - // value is relevant to the use case, and if we do autodetect a variant, - // ctx.VariantChoice can't be used to override it back to "". - wantedVariant = getCPUVariant(runtime.GOOS, runtime.GOARCH) - } - if ctx != nil && ctx.VariantChoice != "" { - wantedVariant = ctx.VariantChoice - } - - wantedOS := runtime.GOOS - if ctx != nil && ctx.OSChoice != "" { - wantedOS = ctx.OSChoice - } - - var variants []string = nil - if wantedVariant != "" { - // If the user requested a specific variant, we'll walk down - // the list from most to least compatible. - if variantOrder := compatibility[wantedArch]; variantOrder != nil { - if i := slices.Index(variantOrder, wantedVariant); i != -1 { - variants = variantOrder[i:] - } - } - if variants == nil { - // user wants a variant which we know nothing about - not even compatibility - variants = []string{wantedVariant} - } - // Make sure to have a candidate with an empty variant as well. - variants = append(variants, "") - } else { - // Make sure to have a candidate with an empty variant as well. - variants = append(variants, "") - // If available add the entire compatibility matrix for the specific architecture. - if possibleVariants, ok := compatibility[wantedArch]; ok { - variants = append(variants, possibleVariants...) - } - } - - res := make([]imgspecv1.Platform, 0, len(variants)) - for _, v := range variants { - res = append(res, imgspecv1.Platform{ - OS: wantedOS, - Architecture: wantedArch, - Variant: v, - }) - } - return res -} - -// MatchesPlatform returns true if a platform descriptor from a multi-arch image matches -// an item from the return value of WantedPlatforms. -func MatchesPlatform(image imgspecv1.Platform, wanted imgspecv1.Platform) bool { - return image.Architecture == wanted.Architecture && - image.OS == wanted.OS && - image.Variant == wanted.Variant -} diff --git a/vendor/github.com/containers/image/v5/internal/private/private.go b/vendor/github.com/containers/image/v5/internal/private/private.go deleted file mode 100644 index ae0cbdf220..0000000000 --- a/vendor/github.com/containers/image/v5/internal/private/private.go +++ /dev/null @@ -1,239 +0,0 @@ -package private - -import ( - "context" - "io" - "time" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/internal/signature" - compression "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageSourceInternalOnly is the part of private.ImageSource that is not -// a part of types.ImageSource. -type ImageSourceInternalOnly interface { - // SupportsGetBlobAt() returns true if GetBlobAt (BlobChunkAccessor) is supported. - SupportsGetBlobAt() bool - // BlobChunkAccessor.GetBlobAt is available only if SupportsGetBlobAt(). - BlobChunkAccessor - - // GetSignaturesWithFormat returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignaturesWithFormat(ctx context.Context, instanceDigest *digest.Digest) ([]signature.Signature, error) -} - -// ImageSource is an internal extension to the types.ImageSource interface. -type ImageSource interface { - types.ImageSource - ImageSourceInternalOnly -} - -// ImageDestinationInternalOnly is the part of private.ImageDestination that is not -// a part of types.ImageDestination. -type ImageDestinationInternalOnly interface { - // SupportsPutBlobPartial returns true if PutBlobPartial is supported. - SupportsPutBlobPartial() bool - // FIXME: Add SupportsSignaturesWithFormat or something like that, to allow early failures - // on unsupported formats. - - // NoteOriginalOCIConfig provides the config of the image, as it exists on the source, BUT converted to OCI format, - // or an error obtaining that value (e.g. if the image is an artifact and not a container image). - // The destination can use it in its TryReusingBlob/PutBlob implementations - // (otherwise it only obtains the final config after all layers are written). - NoteOriginalOCIConfig(ociConfig *imgspecv1.Image, configErr error) error - - // PutBlobWithOptions writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlobWithOptions MUST 1) fail, and 2) delete any data stored so far. - PutBlobWithOptions(ctx context.Context, stream io.Reader, inputInfo types.BlobInfo, options PutBlobOptions) (UploadedBlob, error) - - // PutBlobPartial attempts to create a blob using the data that is already present - // at the destination. chunkAccessor is accessed in a non-sequential way to retrieve the missing chunks. - // It is available only if SupportsPutBlobPartial(). - // Even if SupportsPutBlobPartial() returns true, the call can fail. - // If the call fails with ErrFallbackToOrdinaryLayerDownload, the caller can fall back to PutBlobWithOptions. - // The fallback _must not_ be done otherwise. - PutBlobPartial(ctx context.Context, chunkAccessor BlobChunkAccessor, srcInfo types.BlobInfo, options PutBlobPartialOptions) (UploadedBlob, error) - - // TryReusingBlobWithOptions checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If the blob has been successfully reused, returns (true, info, nil). - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - TryReusingBlobWithOptions(ctx context.Context, info types.BlobInfo, options TryReusingBlobOptions) (bool, ReusedBlob, error) - - // PutSignaturesWithFormat writes a set of signatures to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // MUST be called after PutManifest (signatures may reference manifest contents). - PutSignaturesWithFormat(ctx context.Context, signatures []signature.Signature, instanceDigest *digest.Digest) error - - // CommitWithOptions marks the process of storing the image as successful and asks for the image to be persisted. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before CommitWithOptions() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without CommitWithOptions() (i.e. rollback is allowed but not guaranteed) - CommitWithOptions(ctx context.Context, options CommitOptions) error -} - -// ImageDestination is an internal extension to the types.ImageDestination -// interface. -type ImageDestination interface { - types.ImageDestination - ImageDestinationInternalOnly -} - -// UploadedBlob is information about a blob written to a destination. -// It is the subset of types.BlobInfo fields the transport is responsible for setting; all fields must be provided. -type UploadedBlob struct { - Digest digest.Digest - Size int64 -} - -// PutBlobOptions are used in PutBlobWithOptions. -type PutBlobOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to optionally update with the uploaded bloblook up blob infos. - IsConfig bool // True if the blob is a config - - // The following fields are new to internal/private. Users of internal/private MUST fill them in, - // but they also must expect that they will be ignored by types.ImageDestination transports. - // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers - // if they use internal/imagedestination/impl.Compat; - // in that case, they will all be consistently zero-valued. - - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. -} - -// PutBlobPartialOptions are used in PutBlobPartial. -type PutBlobPartialOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex int // A zero-based index of the layer within the image (PutBlobPartial is only called with layer-like blobs, not configs) -} - -// TryReusingBlobOptions are used in TryReusingBlobWithOptions. -type TryReusingBlobOptions struct { - Cache blobinfocache.BlobInfoCache2 // Cache to use and/or update. - // If true, it is allowed to use an equivalent of the desired blob; - // in that case the returned info may not match the input. - CanSubstitute bool - - // The following fields are new to internal/private. Users of internal/private MUST fill them in, - // but they also must expect that they will be ignored by types.ImageDestination transports. - // Transports, OTOH, MUST support these fields being zero-valued for types.ImageDestination callers - // if they use internal/imagedestination/impl.Compat; - // in that case, they will all be consistently zero-valued. - EmptyLayer bool // True if the blob is an "empty"/"throwaway" layer, and may not necessarily be physically represented. - LayerIndex *int // If the blob is a layer, a zero-based index of the layer within the image; nil otherwise. - SrcRef reference.Named // A reference to the source image that contains the input blob. - PossibleManifestFormats []string // A set of possible manifest formats; at least one should support the reused layer blob. - RequiredCompression *compression.Algorithm // If set, reuse blobs with a matching algorithm as per implementations in internal/imagedestination/impl.helpers.go - OriginalCompression *compression.Algorithm // May be nil to indicate “uncompressed” or “unknown”. - TOCDigest digest.Digest // If specified, the blob can be looked up in the destination also by its TOC digest. -} - -// ReusedBlob is information about a blob reused in a destination. -// It is the subset of types.BlobInfo fields the transport is responsible for setting. -type ReusedBlob struct { - Digest digest.Digest // Must be provided - Size int64 // Must be provided - // The following compression fields should be set when the reuse substitutes - // a differently-compressed blob. - // They may be set also to change from a base variant to a specific variant of an algorithm. - CompressionOperation types.LayerCompression // Compress/Decompress, matching the reused blob; PreserveOriginal if N/A - CompressionAlgorithm *compression.Algorithm // Algorithm if compressed, nil if decompressed or N/A - - // Annotations that should be added, for CompressionAlgorithm. Note that they might need to be - // added even if the digest doesn’t change (if we found the annotations in a cache). - CompressionAnnotations map[string]string - - MatchedByTOCDigest bool // Whether the layer was reused/matched by TOC digest. Used only for UI purposes. -} - -// CommitOptions are used in CommitWithOptions -type CommitOptions struct { - // UnparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list - // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the - // original manifest list digest, if desired. - UnparsedToplevel types.UnparsedImage - // ReportResolvedReference, if set, asks the transport to store a “resolved” (more detailed) reference to the created image - // into the value this option points to. - // What “resolved” means is transport-specific. - // Transports which don’t support reporting resolved references can ignore the field; the generic copy code writes "nil" into the value. - ReportResolvedReference *types.ImageReference - // Timestamp, if set, will force timestamps of content created in the destination to this value. - // Most transports don't support this. - // - // In oci-archive: destinations, this will set the create/mod/access timestamps in each tar entry - // (but not a timestamp of the created archive file). - Timestamp *time.Time -} - -// ImageSourceChunk is a portion of a blob. -// This API is experimental and can be changed without bumping the major version number. -type ImageSourceChunk struct { - // Offset specifies the starting position of the chunk within the source blob. - Offset uint64 - - // Length specifies the size of the chunk. If it is set to math.MaxUint64, - // then it refers to all the data from Offset to the end of the blob. - Length uint64 -} - -// BlobChunkAccessor allows fetching discontiguous chunks of a blob. -type BlobChunkAccessor interface { - // GetBlobAt returns a sequential channel of readers that contain data for the requested - // blob chunks, and a channel that might get a single error value. - // The specified chunks must be not overlapping and sorted by their offset. - // The readers must be fully consumed, in the order they are returned, before blocking - // to read the next chunk. - // If the Length for the last chunk is set to math.MaxUint64, then it - // fully fetches the remaining data from the offset to the end of the blob. - GetBlobAt(ctx context.Context, info types.BlobInfo, chunks []ImageSourceChunk) (chan io.ReadCloser, chan error, error) -} - -// BadPartialRequestError is returned by BlobChunkAccessor.GetBlobAt on an invalid request. -type BadPartialRequestError struct { - Status string -} - -func (e BadPartialRequestError) Error() string { - return e.Status -} - -// UnparsedImage is an internal extension to the types.UnparsedImage interface. -type UnparsedImage interface { - types.UnparsedImage - // UntrustedSignatures is like ImageSource.GetSignaturesWithFormat, but the result is cached; it is OK to call this however often you need. - UntrustedSignatures(ctx context.Context) ([]signature.Signature, error) -} - -// ErrFallbackToOrdinaryLayerDownload is a custom error type returned by PutBlobPartial. -// It suggests to the caller that a fallback mechanism can be used instead of a hard failure; -// otherwise the caller of PutBlobPartial _must not_ fall back to PutBlob. -type ErrFallbackToOrdinaryLayerDownload struct { - err error -} - -func (c ErrFallbackToOrdinaryLayerDownload) Error() string { - return c.err.Error() -} - -func (c ErrFallbackToOrdinaryLayerDownload) Unwrap() error { - return c.err -} - -func NewErrFallbackToOrdinaryLayerDownload(err error) error { - return ErrFallbackToOrdinaryLayerDownload{err: err} -} diff --git a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go b/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go deleted file mode 100644 index b8d3a7e56d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/putblobdigest/put_blob_digest.go +++ /dev/null @@ -1,57 +0,0 @@ -package putblobdigest - -import ( - "io" - - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// Digester computes a digest of the provided stream, if not known yet. -type Digester struct { - knownDigest digest.Digest // Or "" - digester digest.Digester // Or nil -} - -// newDigester initiates computation of a digest.Canonical digest of stream, -// if !validDigest; otherwise it just records knownDigest to be returned later. -// The caller MUST use the returned stream instead of the original value. -func newDigester(stream io.Reader, knownDigest digest.Digest, validDigest bool) (Digester, io.Reader) { - if validDigest { - return Digester{knownDigest: knownDigest}, stream - } else { - res := Digester{ - digester: digest.Canonical.Digester(), - } - stream = io.TeeReader(stream, res.digester.Hash()) - return res, stream - } -} - -// DigestIfUnknown initiates computation of a digest.Canonical digest of stream, -// if no digest is supplied in the provided blobInfo; otherwise blobInfo.Digest will -// be used (accepting any algorithm). -// The caller MUST use the returned stream instead of the original value. -func DigestIfUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { - d := blobInfo.Digest - return newDigester(stream, d, d != "") -} - -// DigestIfCanonicalUnknown initiates computation of a digest.Canonical digest of stream, -// if a digest.Canonical digest is not supplied in the provided blobInfo; -// otherwise blobInfo.Digest will be used. -// The caller MUST use the returned stream instead of the original value. -func DigestIfCanonicalUnknown(stream io.Reader, blobInfo types.BlobInfo) (Digester, io.Reader) { - d := blobInfo.Digest - return newDigester(stream, d, d != "" && d.Algorithm() == digest.Canonical) -} - -// Digest() returns a digest value possibly computed by Digester. -// This must be called only after all of the stream returned by a Digester constructor -// has been successfully read. -func (d Digester) Digest() digest.Digest { - if d.digester != nil { - return d.digester.Digest() - } - return d.knownDigest -} diff --git a/vendor/github.com/containers/image/v5/internal/rootless/rootless.go b/vendor/github.com/containers/image/v5/internal/rootless/rootless.go deleted file mode 100644 index 80623bfbc1..0000000000 --- a/vendor/github.com/containers/image/v5/internal/rootless/rootless.go +++ /dev/null @@ -1,25 +0,0 @@ -package rootless - -import ( - "os" - "strconv" -) - -// GetRootlessEUID returns the UID of the current user (in the parent userNS, if any) -// -// Podman and similar software, in “rootless” configuration, when run as a non-root -// user, very early switches to a user namespace, where Geteuid() == 0 (but does not -// switch to a limited mount namespace); so, code relying on Geteuid() would use -// system-wide paths in e.g. /var, when the user is actually not privileged to write to -// them, and expects state to be stored in the home directory. -// -// If Podman is setting up such a user namespace, it records the original UID in an -// environment variable, allowing us to make choices based on the actual user’s identity. -func GetRootlessEUID() int { - euidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if euidEnv != "" { - euid, _ := strconv.Atoi(euidEnv) - return euid - } - return os.Geteuid() -} diff --git a/vendor/github.com/containers/image/v5/internal/set/set.go b/vendor/github.com/containers/image/v5/internal/set/set.go deleted file mode 100644 index 7716b12d5b..0000000000 --- a/vendor/github.com/containers/image/v5/internal/set/set.go +++ /dev/null @@ -1,55 +0,0 @@ -package set - -import ( - "iter" - "maps" -) - -// FIXME: -// - Docstrings -// - This should be in a public library somewhere - -type Set[E comparable] struct { - m map[E]struct{} -} - -func New[E comparable]() *Set[E] { - return &Set[E]{ - m: map[E]struct{}{}, - } -} - -func NewWithValues[E comparable](values ...E) *Set[E] { - s := New[E]() - for _, v := range values { - s.Add(v) - } - return s -} - -func (s *Set[E]) Add(v E) { - s.m[v] = struct{}{} // Possibly writing the same struct{}{} presence marker again. -} - -func (s *Set[E]) AddSeq(seq iter.Seq[E]) { - for v := range seq { - s.Add(v) - } -} - -func (s *Set[E]) Delete(v E) { - delete(s.m, v) -} - -func (s *Set[E]) Contains(v E) bool { - _, ok := s.m[v] - return ok -} - -func (s *Set[E]) Empty() bool { - return len(s.m) == 0 -} - -func (s *Set[E]) All() iter.Seq[E] { - return maps.Keys(s.m) -} diff --git a/vendor/github.com/containers/image/v5/internal/signature/signature.go b/vendor/github.com/containers/image/v5/internal/signature/signature.go deleted file mode 100644 index 6f95115a13..0000000000 --- a/vendor/github.com/containers/image/v5/internal/signature/signature.go +++ /dev/null @@ -1,102 +0,0 @@ -package signature - -import ( - "bytes" - "errors" - "fmt" -) - -// FIXME FIXME: MIME type? Int? String? -// An interface with a name, parse methods? -type FormatID string - -const ( - SimpleSigningFormat FormatID = "simple-signing" - SigstoreFormat FormatID = "sigstore-json" - // Update also UnsupportedFormatError below -) - -// Signature is an image signature of some kind. -type Signature interface { - FormatID() FormatID - // blobChunk returns a representation of signature as a []byte, suitable for long-term storage. - // Almost everyone should use signature.Blob() instead. - blobChunk() ([]byte, error) -} - -// Blob returns a representation of sig as a []byte, suitable for long-term storage. -func Blob(sig Signature) ([]byte, error) { - chunk, err := sig.blobChunk() - if err != nil { - return nil, err - } - - format := sig.FormatID() - switch format { - case SimpleSigningFormat: - // For compatibility with old dir formats: - return chunk, nil - default: - res := []byte{0} // Start with a zero byte to clearly mark this is a binary format, and disambiguate from random text. - res = append(res, []byte(format)...) - res = append(res, '\n') - res = append(res, chunk...) - return res, nil - } -} - -// FromBlob returns a signature from parsing a blob created by signature.Blob. -func FromBlob(blob []byte) (Signature, error) { - if len(blob) == 0 { - return nil, errors.New("empty signature blob") - } - // Historically we’ve just been using GPG with no identification; try to auto-detect that. - switch blob[0] { - // OpenPGP "compressed data" wrapping the message - case 0xA0, 0xA1, 0xA2, 0xA3, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 8 (tag: compressed data packet); bits 1…0 = length-type (any) - 0xC8, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 8 (tag: compressed data packet) - // OpenPGP “one-pass signature” starting a signature - 0x90, 0x91, 0x92, 0x3d, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 4 (tag: one-pass signature packet); bits 1…0 = length-type (any) - 0xC4, // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 4 (tag: one-pass signature packet) - // OpenPGP signature packet signing the following data - 0x88, 0x89, 0x8A, 0x8B, // bit 7 = 1; bit 6 = 0 (old packet format); bits 5…2 = 2 (tag: signature packet); bits 1…0 = length-type (any) - 0xC2: // bit 7 = 1; bit 6 = 1 (new packet format); bits 5…0 = 2 (tag: signature packet) - return SimpleSigningFromBlob(blob), nil - - // The newer format: binary 0, format name, newline, data - case 0x00: - blob = blob[1:] - formatBytes, blobChunk, foundNewline := bytes.Cut(blob, []byte{'\n'}) - if !foundNewline { - return nil, fmt.Errorf("invalid signature format, missing newline") - } - for _, b := range formatBytes { - if b < 32 || b >= 0x7F { - return nil, fmt.Errorf("invalid signature format, non-ASCII byte %#x", b) - } - } - switch { - case bytes.Equal(formatBytes, []byte(SimpleSigningFormat)): - return SimpleSigningFromBlob(blobChunk), nil - case bytes.Equal(formatBytes, []byte(SigstoreFormat)): - return sigstoreFromBlobChunk(blobChunk) - default: - return nil, fmt.Errorf("unrecognized signature format %q", string(formatBytes)) - } - - default: - return nil, fmt.Errorf("unrecognized signature format, starting with binary %#x", blob[0]) - } - -} - -// UnsupportedFormatError returns an error complaining about sig having an unsupported format. -func UnsupportedFormatError(sig Signature) error { - formatID := sig.FormatID() - switch formatID { - case SimpleSigningFormat, SigstoreFormat: - return fmt.Errorf("unsupported signature format %s", string(formatID)) - default: - return fmt.Errorf("unsupported, and unrecognized, signature format %q", string(formatID)) - } -} diff --git a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go b/vendor/github.com/containers/image/v5/internal/signature/sigstore.go deleted file mode 100644 index 8025cd2700..0000000000 --- a/vendor/github.com/containers/image/v5/internal/signature/sigstore.go +++ /dev/null @@ -1,86 +0,0 @@ -package signature - -import ( - "bytes" - "encoding/json" - "maps" -) - -const ( - // from sigstore/cosign/pkg/types.SimpleSigningMediaType - SigstoreSignatureMIMEType = "application/vnd.dev.cosign.simplesigning.v1+json" - // from sigstore/cosign/pkg/oci/static.SignatureAnnotationKey - SigstoreSignatureAnnotationKey = "dev.cosignproject.cosign/signature" - // from sigstore/cosign/pkg/oci/static.BundleAnnotationKey - SigstoreSETAnnotationKey = "dev.sigstore.cosign/bundle" - // from sigstore/cosign/pkg/oci/static.CertificateAnnotationKey - SigstoreCertificateAnnotationKey = "dev.sigstore.cosign/certificate" - // from sigstore/cosign/pkg/oci/static.ChainAnnotationKey - SigstoreIntermediateCertificateChainAnnotationKey = "dev.sigstore.cosign/chain" -) - -// Sigstore is a github.com/cosign/cosign signature. -// For the persistent-storage format used for blobChunk(), we want -// a degree of forward compatibility against unexpected field changes -// (as has happened before), which is why this data type -// contains just a payload + annotations (including annotations -// that we don’t recognize or support), instead of individual fields -// for the known annotations. -type Sigstore struct { - untrustedMIMEType string - untrustedPayload []byte - untrustedAnnotations map[string]string -} - -// sigstoreJSONRepresentation needs the files to be public, which we don’t want for -// the main Sigstore type. -type sigstoreJSONRepresentation struct { - UntrustedMIMEType string `json:"mimeType"` - UntrustedPayload []byte `json:"payload"` - UntrustedAnnotations map[string]string `json:"annotations"` -} - -// SigstoreFromComponents returns a Sigstore object from its components. -func SigstoreFromComponents(untrustedMimeType string, untrustedPayload []byte, untrustedAnnotations map[string]string) Sigstore { - return Sigstore{ - untrustedMIMEType: untrustedMimeType, - untrustedPayload: bytes.Clone(untrustedPayload), - untrustedAnnotations: maps.Clone(untrustedAnnotations), - } -} - -// sigstoreFromBlobChunk converts a Sigstore signature, as returned by Sigstore.blobChunk, into a Sigstore object. -func sigstoreFromBlobChunk(blobChunk []byte) (Sigstore, error) { - var v sigstoreJSONRepresentation - if err := json.Unmarshal(blobChunk, &v); err != nil { - return Sigstore{}, err - } - return SigstoreFromComponents(v.UntrustedMIMEType, - v.UntrustedPayload, - v.UntrustedAnnotations), nil -} - -func (s Sigstore) FormatID() FormatID { - return SigstoreFormat -} - -// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. -// Almost everyone should use signature.Blob() instead. -func (s Sigstore) blobChunk() ([]byte, error) { - return json.Marshal(sigstoreJSONRepresentation{ - UntrustedMIMEType: s.UntrustedMIMEType(), - UntrustedPayload: s.UntrustedPayload(), - UntrustedAnnotations: s.UntrustedAnnotations(), - }) -} - -func (s Sigstore) UntrustedMIMEType() string { - return s.untrustedMIMEType -} -func (s Sigstore) UntrustedPayload() []byte { - return bytes.Clone(s.untrustedPayload) -} - -func (s Sigstore) UntrustedAnnotations() map[string]string { - return maps.Clone(s.untrustedAnnotations) -} diff --git a/vendor/github.com/containers/image/v5/internal/signature/simple.go b/vendor/github.com/containers/image/v5/internal/signature/simple.go deleted file mode 100644 index 76f270b48e..0000000000 --- a/vendor/github.com/containers/image/v5/internal/signature/simple.go +++ /dev/null @@ -1,29 +0,0 @@ -package signature - -import "bytes" - -// SimpleSigning is a “simple signing” signature. -type SimpleSigning struct { - untrustedSignature []byte -} - -// SimpleSigningFromBlob converts a “simple signing” signature into a SimpleSigning object. -func SimpleSigningFromBlob(blobChunk []byte) SimpleSigning { - return SimpleSigning{ - untrustedSignature: bytes.Clone(blobChunk), - } -} - -func (s SimpleSigning) FormatID() FormatID { - return SimpleSigningFormat -} - -// blobChunk returns a representation of signature as a []byte, suitable for long-term storage. -// Almost everyone should use signature.Blob() instead. -func (s SimpleSigning) blobChunk() ([]byte, error) { - return bytes.Clone(s.untrustedSignature), nil -} - -func (s SimpleSigning) UntrustedSignature() []byte { - return bytes.Clone(s.untrustedSignature) -} diff --git a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go b/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go deleted file mode 100644 index d5a5436a4d..0000000000 --- a/vendor/github.com/containers/image/v5/internal/streamdigest/stream_digest.go +++ /dev/null @@ -1,40 +0,0 @@ -package streamdigest - -import ( - "fmt" - "io" - "os" - - "github.com/containers/image/v5/internal/putblobdigest" - "github.com/containers/image/v5/internal/tmpdir" - "github.com/containers/image/v5/types" -) - -// ComputeBlobInfo streams a blob to a temporary file and populates Digest and Size in inputInfo. -// The temporary file is returned as an io.Reader along with a cleanup function. -// It is the caller's responsibility to call the cleanup function, which closes and removes the temporary file. -// If an error occurs, inputInfo is not modified. -func ComputeBlobInfo(sys *types.SystemContext, stream io.Reader, inputInfo *types.BlobInfo) (io.Reader, func(), error) { - diskBlob, err := tmpdir.CreateBigFileTemp(sys, "stream-blob") - if err != nil { - return nil, nil, fmt.Errorf("creating temporary on-disk layer: %w", err) - } - cleanup := func() { - diskBlob.Close() - os.Remove(diskBlob.Name()) - } - digester, stream := putblobdigest.DigestIfCanonicalUnknown(stream, *inputInfo) - written, err := io.Copy(diskBlob, stream) - if err != nil { - cleanup() - return nil, nil, fmt.Errorf("writing to temporary on-disk layer: %w", err) - } - _, err = diskBlob.Seek(0, io.SeekStart) - if err != nil { - cleanup() - return nil, nil, fmt.Errorf("rewinding temporary on-disk layer: %w", err) - } - inputInfo.Digest = digester.Digest() - inputInfo.Size = written - return diskBlob, cleanup, nil -} diff --git a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go deleted file mode 100644 index bab73ee334..0000000000 --- a/vendor/github.com/containers/image/v5/internal/tmpdir/tmpdir.go +++ /dev/null @@ -1,44 +0,0 @@ -package tmpdir - -import ( - "os" - "runtime" - - "github.com/containers/image/v5/types" -) - -// unixTempDirForBigFiles is the directory path to store big files on non Windows systems. -// You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/internal/tmpdir.unixTempDirForBigFiles=$your_path' -var unixTempDirForBigFiles = builtinUnixTempDirForBigFiles - -// builtinUnixTempDirForBigFiles is the directory path to store big files. -// Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. -// DO NOT change this, instead see unixTempDirForBigFiles above. -const builtinUnixTempDirForBigFiles = "/var/tmp" - -const prefix = "container_images_" - -// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. -// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp -// which on systemd based systems could be the unsuitable tmpfs filesystem. -func temporaryDirectoryForBigFiles(sys *types.SystemContext) string { - if sys != nil && sys.BigFilesTemporaryDir != "" { - return sys.BigFilesTemporaryDir - } - var temporaryDirectoryForBigFiles string - if runtime.GOOS == "windows" { - temporaryDirectoryForBigFiles = os.TempDir() - } else { - temporaryDirectoryForBigFiles = unixTempDirForBigFiles - } - return temporaryDirectoryForBigFiles -} - -func CreateBigFileTemp(sys *types.SystemContext, name string) (*os.File, error) { - return os.CreateTemp(temporaryDirectoryForBigFiles(sys), prefix+name) -} - -func MkDirBigFileTemp(sys *types.SystemContext, name string) (string, error) { - return os.MkdirTemp(temporaryDirectoryForBigFiles(sys), prefix+name) -} diff --git a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go b/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go deleted file mode 100644 index b95370af76..0000000000 --- a/vendor/github.com/containers/image/v5/internal/uploadreader/upload_reader.go +++ /dev/null @@ -1,61 +0,0 @@ -package uploadreader - -import ( - "io" - "sync" -) - -// UploadReader is a pass-through reader for use in sending non-trivial data using the net/http -// package (http.NewRequest, http.Post and the like). -// -// The net/http package uses a separate goroutine to upload data to a HTTP connection, -// and it is possible for the server to return a response (typically an error) before consuming -// the full body of the request. In that case http.Client.Do can return with an error while -// the body is still being read — regardless of the cancellation, if any, of http.Request.Context(). -// -// As a result, any data used/updated by the io.Reader() provided as the request body may be -// used/updated even after http.Client.Do returns, causing races. -// -// To fix this, UploadReader provides a synchronized Terminate() method, which can block for -// a not-completely-negligible time (for a duration of the underlying Read()), but guarantees that -// after Terminate() returns, the underlying reader is never used any more (unlike calling -// the cancellation callback of context.WithCancel, which returns before any recipients may have -// reacted to the cancellation). -type UploadReader struct { - mutex sync.Mutex - // The following members can only be used with mutex held - reader io.Reader - terminationError error // nil if not terminated yet -} - -// NewUploadReader returns an UploadReader for an "underlying" reader. -func NewUploadReader(underlying io.Reader) *UploadReader { - return &UploadReader{ - reader: underlying, - terminationError: nil, - } -} - -// Read returns the error set by Terminate, if any, or calls the underlying reader. -// It is safe to call this from a different goroutine than Terminate. -func (ur *UploadReader) Read(p []byte) (int, error) { - ur.mutex.Lock() - defer ur.mutex.Unlock() - - if ur.terminationError != nil { - return 0, ur.terminationError - } - return ur.reader.Read(p) -} - -// Terminate waits for in-progress Read calls, if any, to finish, and ensures that after -// this function returns, any Read calls will fail with the provided error, and the underlying -// reader will never be used any more. -// -// It is safe to call this from a different goroutine than Read. -func (ur *UploadReader) Terminate(err error) { - ur.mutex.Lock() // May block for some time if ur.reader.Read() is in progress - defer ur.mutex.Unlock() - - ur.terminationError = err -} diff --git a/vendor/github.com/containers/image/v5/internal/useragent/useragent.go b/vendor/github.com/containers/image/v5/internal/useragent/useragent.go deleted file mode 100644 index 7ac49693ed..0000000000 --- a/vendor/github.com/containers/image/v5/internal/useragent/useragent.go +++ /dev/null @@ -1,6 +0,0 @@ -package useragent - -import "github.com/containers/image/v5/version" - -// DefaultUserAgent is a value that should be used by User-Agent headers, unless the user specifically instructs us otherwise. -var DefaultUserAgent = "containers/" + version.Version + " (github.com/containers/image)" diff --git a/vendor/github.com/containers/image/v5/manifest/common.go b/vendor/github.com/containers/image/v5/manifest/common.go deleted file mode 100644 index 8d9d5795f2..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/common.go +++ /dev/null @@ -1,152 +0,0 @@ -package manifest - -import ( - "fmt" - - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/sirupsen/logrus" -) - -// layerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func layerInfosToStrings(infos []LayerInfo) []string { - layers := make([]string, len(infos)) - for i, info := range infos { - layers[i] = info.Digest.String() - } - return layers -} - -// compressionMIMETypeSet describes a set of MIME type “variants” that represent differently-compressed -// versions of “the same kind of content”. -// The map key is the return value of compressiontypes.Algorithm.Name(), or mtsUncompressed; -// the map value is a MIME type, or mtsUnsupportedMIMEType to mean "recognized but unsupported". -type compressionMIMETypeSet map[string]string - -const mtsUncompressed = "" // A key in compressionMIMETypeSet for the uncompressed variant -const mtsUnsupportedMIMEType = "" // A value in compressionMIMETypeSet that means “recognized but unsupported” - -// findCompressionMIMETypeSet returns a pointer to a compressionMIMETypeSet in variantTable that contains a value of mimeType, or nil if not found -func findCompressionMIMETypeSet(variantTable []compressionMIMETypeSet, mimeType string) compressionMIMETypeSet { - for _, variants := range variantTable { - for _, mt := range variants { - if mt == mimeType { - return variants - } - } - } - return nil -} - -// compressionVariantMIMEType returns a variant of mimeType for the specified algorithm (which may be nil -// to mean "no compression"), based on variantTable. -// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants -// that differ only in what type of compression is applied, but it can't be combined with this -// algorithm to produce an updated MIME type that complies with the standard that defines mimeType. -// If the compression algorithm is unrecognized, or mimeType is not known to have variants that -// differ from it only in what type of compression has been applied, the returned error will not be -// a ManifestLayerCompressionIncompatibilityError. -func compressionVariantMIMEType(variantTable []compressionMIMETypeSet, mimeType string, algorithm *compressiontypes.Algorithm) (string, error) { - if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries - return "", fmt.Errorf("cannot update unknown MIME type") - } - variants := findCompressionMIMETypeSet(variantTable, mimeType) - if variants != nil { - name := mtsUncompressed - if algorithm != nil { - name = algorithm.BaseVariantName() - } - if res, ok := variants[name]; ok { - if res != mtsUnsupportedMIMEType { - return res, nil - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("%s compression is not supported for type %q", name, mimeType)} - } - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} - } - if name != mtsUncompressed { - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("unknown compressed with algorithm %s variant for type %q", name, mimeType)} - } - // We can't very well say “the idea of no compression is unknown” - return "", ManifestLayerCompressionIncompatibilityError{fmt.Sprintf("uncompressed variant is not supported for type %q", mimeType)} - } - if algorithm != nil { - return "", fmt.Errorf("unsupported MIME type for compression: %q", mimeType) - } - return "", fmt.Errorf("unsupported MIME type for decompression: %q", mimeType) -} - -// updatedMIMEType returns the result of applying edits in updated (MediaType, CompressionOperation) to -// mimeType, based on variantTable. It may use updated.Digest for error messages. -// The returned error will be a ManifestLayerCompressionIncompatibilityError if mimeType has variants -// that differ only in what type of compression is applied, but applying updated.CompressionOperation -// and updated.CompressionAlgorithm to it won't produce an updated MIME type that complies with the -// standard that defines mimeType. -func updatedMIMEType(variantTable []compressionMIMETypeSet, mimeType string, updated types.BlobInfo) (string, error) { - // Note that manifests in containers-storage might be reporting the - // wrong media type since the original manifests are stored while layers - // are decompressed in storage. Hence, we need to consider the case - // that an already {de}compressed layer should be {de}compressed; - // compressionVariantMIMEType does that by not caring whether the original is - // {de}compressed. - switch updated.CompressionOperation { - case types.PreserveOriginal: - // Force a change to the media type if we're being told to use a particular compressor, - // since it might be different from the one associated with the media type. Otherwise, - // try to keep the original media type. - if updated.CompressionAlgorithm != nil { - return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) - } - // Keep the original media type. - return mimeType, nil - - case types.Decompress: - return compressionVariantMIMEType(variantTable, mimeType, nil) - - case types.Compress: - if updated.CompressionAlgorithm == nil { - logrus.Debugf("Error preparing updated manifest: blob %q was compressed but does not specify by which algorithm: falling back to use the original blob", updated.Digest) - return mimeType, nil - } - return compressionVariantMIMEType(variantTable, mimeType, updated.CompressionAlgorithm) - - default: - return "", fmt.Errorf("unknown compression operation (%d)", updated.CompressionOperation) - } -} - -// ManifestLayerCompressionIncompatibilityError indicates that a specified compression algorithm -// could not be applied to a layer MIME type. A caller that receives this should either retry -// the call with a different compression algorithm, or attempt to use a different manifest type. -type ManifestLayerCompressionIncompatibilityError struct { - text string -} - -func (m ManifestLayerCompressionIncompatibilityError) Error() string { - return m.text -} - -// compressionVariantsRecognizeMIMEType returns true if variantTable contains data about compressing/decompressing layers with mimeType -// Note that the caller still needs to worry about a specific algorithm not being supported. -func compressionVariantsRecognizeMIMEType(variantTable []compressionMIMETypeSet, mimeType string) bool { - if mimeType == mtsUnsupportedMIMEType { // Prevent matching against the {algo:mtsUnsupportedMIMEType} entries - return false - } - variants := findCompressionMIMETypeSet(variantTable, mimeType) - return variants != nil // Alternatively, this could be len(variants) > 1, but really the caller should ask about a specific algorithm. -} - -// imgInspectLayersFromLayerInfos converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() -// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. -func imgInspectLayersFromLayerInfos(infos []LayerInfo) []types.ImageInspectLayer { - layers := make([]types.ImageInspectLayer, len(infos)) - for i, info := range infos { - layers[i].MIMEType = info.MediaType - layers[i].Digest = info.Digest - layers[i].Size = info.Size - layers[i].Annotations = info.Annotations - } - return layers -} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go b/vendor/github.com/containers/image/v5/manifest/docker_schema1.go deleted file mode 100644 index f4b1fc0337..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema1.go +++ /dev/null @@ -1,346 +0,0 @@ -package manifest - -import ( - "encoding/json" - "errors" - "fmt" - "slices" - "strings" - "time" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/internal/set" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/regexp" - "github.com/docker/docker/api/types/versions" - "github.com/opencontainers/go-digest" -) - -// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. -type Schema1FSLayers struct { - BlobSum digest.Digest `json:"blobSum"` -} - -// Schema1History is an entry of the "history" array in docker/distribution schema 1. -type Schema1History struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// Schema1 is a manifest in docker/distribution schema 1. -type Schema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []Schema1FSLayers `json:"fsLayers"` - History []Schema1History `json:"history"` // Keep this in sync with ExtractedV1Compatibility! - ExtractedV1Compatibility []Schema1V1Compatibility `json:"-"` // Keep this in sync with History! Does not contain the full config (Schema2V1Image) - SchemaVersion int `json:"schemaVersion"` -} - -type schema1V1CompatibilityContainerConfig struct { - Cmd []string -} - -// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. -type Schema1V1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig schema1V1CompatibilityContainerConfig `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - -// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. -// (NOTE: The instance is not necessary a literal representation of the original blob, -// layers with duplicate IDs are eliminated.) -func Schema1FromManifest(manifestBlob []byte) (*Schema1, error) { - s1 := Schema1{} - if err := json.Unmarshal(manifestBlob, &s1); err != nil { - return nil, err - } - if s1.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d", s1.SchemaVersion) - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema1SignedMediaType, - manifest.AllowedFieldFSLayers|manifest.AllowedFieldHistory); err != nil { - return nil, err - } - if err := s1.initialize(); err != nil { - return nil, err - } - if err := s1.fixManifestLayers(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. -func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) (*Schema1, error) { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - s1 := Schema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } - if err := s1.initialize(); err != nil { - return nil, err - } - return &s1, nil -} - -// Schema1Clone creates a copy of the supplied Schema1 manifest. -func Schema1Clone(src *Schema1) *Schema1 { - copy := *src - return © -} - -// initialize initializes ExtractedV1Compatibility and verifies invariants, so that the rest of this code can assume a minimally healthy manifest. -func (m *Schema1) initialize() error { - if len(m.FSLayers) != len(m.History) { - return errors.New("length of history not equal to number of layers") - } - if len(m.FSLayers) == 0 { - return errors.New("no FSLayers in manifest") - } - m.ExtractedV1Compatibility = make([]Schema1V1Compatibility, len(m.History)) - for i, h := range m.History { - if err := json.Unmarshal([]byte(h.V1Compatibility), &m.ExtractedV1Compatibility[i]); err != nil { - return fmt.Errorf("parsing v2s1 history entry %d: %w", i, err) - } - } - return nil -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema1) LayerInfos() []LayerInfo { - layers := make([]LayerInfo, 0, len(m.FSLayers)) - for i, layer := range slices.Backward(m.FSLayers) { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers = append(layers, LayerInfo{ - BlobInfo: types.BlobInfo{Digest: layer.BlobSum, Size: -1}, - EmptyLayer: m.ExtractedV1Compatibility[i].ThrowAway, - }) - } - return layers -} - -const fakeSchema1MIMEType = DockerV2Schema2LayerMediaType // Used only in schema1CompressionMIMETypeSets -var schema1CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: fakeSchema1MIMEType, - compressiontypes.GzipAlgorithmName: fakeSchema1MIMEType, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - // Our LayerInfos includes empty layers (where m.ExtractedV1Compatibility[].ThrowAway), so expect them to be included here as well. - if len(m.FSLayers) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) - } - m.FSLayers = make([]Schema1FSLayers, len(layerInfos)) - for i, info := range layerInfos { - // There are no MIME types in schema1, but we do a “conversion” here to reject unsupported compression algorithms, - // in a way that is consistent with the other schema implementations. - if _, err := updatedMIMEType(schema1CompressionMIMETypeSets, fakeSchema1MIMEType, info); err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - // (docker push) sets up m.ExtractedV1Compatibility[].{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest - if info.CryptoOperation != types.PreserveOriginalCrypto { - return fmt.Errorf("encryption change (for layer %q) is not supported in schema1 manifests", info.Digest) - } - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema1) Serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) - if err != nil { - return nil, err - } - return AddDummyV2S1Signature(unsigned) -} - -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), -// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, -// both from m.History and m.FSLayers). -// Note that even after this succeeds, m.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func (m *Schema1) fixManifestLayers() error { - // m.initialize() has verified that len(m.FSLayers) == len(m.History) - for _, compat := range m.ExtractedV1Compatibility { - if err := validateV1ID(compat.ID); err != nil { - return err - } - } - if m.ExtractedV1Compatibility[len(m.ExtractedV1Compatibility)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := set.New[string]() - var lastID string - for _, img := range m.ExtractedV1Compatibility { - // skip IDs that appear after each other, we handle those later - if img.ID != lastID && idmap.Contains(img.ID) { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap.Add(lastID) - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(m.ExtractedV1Compatibility) - 2; i >= 0; i-- { - if m.ExtractedV1Compatibility[i].ID == m.ExtractedV1Compatibility[i+1].ID { // repeated ID. remove and continue - m.FSLayers = slices.Delete(m.FSLayers, i, i+1) - m.History = slices.Delete(m.History, i, i+1) - m.ExtractedV1Compatibility = slices.Delete(m.ExtractedV1Compatibility, i, i+1) - } else if m.ExtractedV1Compatibility[i].Parent != m.ExtractedV1Compatibility[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %q", m.ExtractedV1Compatibility[i+1].ID, m.ExtractedV1Compatibility[i].Parent) - } - } - return nil -} - -var validHex = regexp.Delayed(`^([a-f0-9]{64})$`) - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - s1 := &Schema2V1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: m.Tag, - Created: &s1.Created, - DockerVersion: s1.DockerVersion, - Architecture: s1.Architecture, - Variant: s1.Variant, - Os: s1.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Author: s1.Author, - } - if s1.Config != nil { - i.Labels = s1.Config.Labels - i.Env = s1.Config.Env - } - return i, nil -} - -// ToSchema2Config builds a schema2-style configuration blob using the supplied diffIDs. -func (m *Schema1) ToSchema2Config(diffIDs []digest.Digest) ([]byte, error) { - // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields - // that aren't directly comparable using info from the manifest. - if len(m.History) == 0 { - return nil, errors.New("image has no layers") - } - s1 := Schema2V1Image{} - config := []byte(m.History[0].V1Compatibility) - err := json.Unmarshal(config, &s1) - if err != nil { - return nil, fmt.Errorf("decoding configuration: %w", err) - } - // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, - // adding some fields that aren't "omitempty". - if s1.DockerVersion != "" && versions.LessThan(s1.DockerVersion, "1.8.3") { - config, err = json.Marshal(&s1) - if err != nil { - return nil, fmt.Errorf("re-encoding compat image config %#v: %w", s1, err) - } - } - // Build the history. - convertedHistory := []Schema2History{} - for _, compat := range slices.Backward(m.ExtractedV1Compatibility) { - hitem := Schema2History{ - Created: compat.Created, - CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), - Author: compat.Author, - Comment: compat.Comment, - EmptyLayer: compat.ThrowAway, - } - convertedHistory = append(convertedHistory, hitem) - } - // Build the rootfs information. We need the decompressed sums that we've been - // calculating to fill in the DiffIDs. It's expected (but not enforced by us) - // that the number of diffIDs corresponds to the number of non-EmptyLayer - // entries in the history. - rootFS := &Schema2RootFS{ - Type: "layers", - DiffIDs: diffIDs, - } - // And now for some raw manipulation. - raw := make(map[string]*json.RawMessage) - err = json.Unmarshal(config, &raw) - if err != nil { - return nil, fmt.Errorf("re-decoding compat image config %#v: %w", s1, err) - } - // Drop some fields. - delete(raw, "id") - delete(raw, "parent") - delete(raw, "parent_id") - delete(raw, "layer_id") - delete(raw, "throwaway") - delete(raw, "Size") - // Add the history and rootfs information. - rootfs, err := json.Marshal(rootFS) - if err != nil { - return nil, fmt.Errorf("error encoding rootfs information %#v: %w", rootFS, err) - } - rawRootfs := json.RawMessage(rootfs) - raw["rootfs"] = &rawRootfs - history, err := json.Marshal(convertedHistory) - if err != nil { - return nil, fmt.Errorf("error encoding history information %#v: %w", convertedHistory, err) - } - rawHistory := json.RawMessage(history) - raw["history"] = &rawHistory - // Encode the result. - config, err = json.Marshal(raw) - if err != nil { - return nil, fmt.Errorf("error re-encoding compat image config %#v: %w", s1, err) - } - return config, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { - image, err := m.ToSchema2Config(diffIDs) - if err != nil { - return "", err - } - return digest.FromBytes(image).Encoded(), nil -} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2.go deleted file mode 100644 index 7e53f4f54e..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2.go +++ /dev/null @@ -1,307 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/containers/image/v5/internal/manifest" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/pkg/strslice" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. -type Schema2Descriptor = manifest.Schema2Descriptor - -// BlobInfoFromSchema2Descriptor returns a types.BlobInfo based on the input schema 2 descriptor. -func BlobInfoFromSchema2Descriptor(desc Schema2Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - MediaType: desc.MediaType, - } -} - -// Schema2 is a manifest in docker/distribution schema 2. -type Schema2 struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor Schema2Descriptor `json:"config"` - LayersDescriptors []Schema2Descriptor `json:"layers"` -} - -// Schema2Port is a Port, a string containing port number and protocol in the -// format "80/tcp", from docker/go-connections/nat. -type Schema2Port string - -// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from -// docker/go-connections/nat. -type Schema2PortSet map[Schema2Port]struct{} - -// Schema2HealthConfig is a HealthConfig, which holds configuration settings -// for the HEALTHCHECK feature, from docker/docker/api/types/container. -type Schema2HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - StartPeriod time.Duration `json:",omitempty"` // StartPeriod is the time to wait after starting before running the first check. - StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Schema2Config is a Config in docker/docker/api/types/container. -type Schema2Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container, also support user:group - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} - -// Schema2V1Image is a V1Image in docker/docker/image. -type Schema2V1Image struct { - // ID is a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent is the ID of the parent image - Parent string `json:"parent,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig Schema2Config `json:"container_config,omitempty"` - // DockerVersion specifies the version of Docker that was used to build the image - DockerVersion string `json:"docker_version,omitempty"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *Schema2Config `json:"config,omitempty"` - // Architecture is the hardware that the image is built and runs on - Architecture string `json:"architecture,omitempty"` - // Variant is a variant of the CPU that the image is built and runs on - Variant string `json:"variant,omitempty"` - // OS is the operating system used to built and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. -type Schema2RootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` -} - -// Schema2History stores build commands that were used to create an image, from docker/docker/image. -type Schema2History struct { - // Created is the timestamp at which the image was created - Created time.Time `json:"created"` - // Author is the name of the author that was specified when committing the image - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building the image - CreatedBy string `json:"created_by,omitempty"` - // Comment is the commit message that was set when committing the image - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Schema2Image is an Image in docker/docker/image. -type Schema2Image struct { - Schema2V1Image - Parent digest.Digest `json:"parent,omitempty"` - RootFS *Schema2RootFS `json:"rootfs,omitempty"` - History []Schema2History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` -} - -// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. -func Schema2FromManifest(manifestBlob []byte) (*Schema2, error) { - s2 := Schema2{} - if err := json.Unmarshal(manifestBlob, &s2); err != nil { - return nil, err - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, DockerV2Schema2MediaType, - manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { - return nil, err - } - // Check manifest's and layers' media types. - if err := SupportedSchema2MediaType(s2.MediaType); err != nil { - return nil, err - } - for _, layer := range s2.LayersDescriptors { - if err := SupportedSchema2MediaType(layer.MediaType); err != nil { - return nil, err - } - } - return &s2, nil -} - -// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. -func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { - return &Schema2{ - SchemaVersion: 2, - MediaType: DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, - } -} - -// Schema2Clone creates a copy of the supplied Schema2 manifest. -func Schema2Clone(src *Schema2) *Schema2 { - copy := *src - return © -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *Schema2) ConfigInfo() types.BlobInfo { - return BlobInfoFromSchema2Descriptor(m.ConfigDescriptor) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *Schema2) LayerInfos() []LayerInfo { - blobs := make([]LayerInfo, 0, len(m.LayersDescriptors)) - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromSchema2Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -var schema2CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: DockerV2Schema2ForeignLayerMediaType, - compressiontypes.GzipAlgorithmName: DockerV2Schema2ForeignLayerMediaTypeGzip, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, - { - mtsUncompressed: DockerV2SchemaLayerMediaTypeUncompressed, - compressiontypes.GzipAlgorithmName: DockerV2Schema2LayerMediaType, - compressiontypes.ZstdAlgorithmName: mtsUnsupportedMIMEType, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and -// CompressionAlgorithm that would result in anything other than gzip compression. -func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.LayersDescriptors) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) - } - original := m.LayersDescriptors - m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) - for i, info := range layerInfos { - mimeType := original[i].MediaType - // First make sure we support the media type of the original layer. - if err := SupportedSchema2MediaType(mimeType); err != nil { - return fmt.Errorf("Error preparing updated manifest: unknown media type of original layer %q: %q", info.Digest, mimeType) - } - mimeType, err := updatedMIMEType(schema2CompressionMIMETypeSets, mimeType, info) - if err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - m.LayersDescriptors[i].MediaType = mimeType - m.LayersDescriptors[i].Digest = info.Digest - m.LayersDescriptors[i].Size = info.Size - m.LayersDescriptors[i].URLs = info.URLs - if info.CryptoOperation != types.PreserveOriginalCrypto { - return fmt.Errorf("encryption change (for layer %q) is not supported in schema2 manifests", info.Digest) - } - } - return nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *Schema2) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - s2 := &Schema2Image{} - if err := json.Unmarshal(config, s2); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: "", - Created: &s2.Created, - DockerVersion: s2.DockerVersion, - Architecture: s2.Architecture, - Variant: s2.Variant, - Os: s2.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Author: s2.Author, - } - if s2.Config != nil { - i.Labels = s2.Config.Labels - i.Env = s2.Config.Env - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *Schema2) ImageID([]digest.Digest) (string, error) { - if err := m.ConfigDescriptor.Digest.Validate(); err != nil { - return "", err - } - return m.ConfigDescriptor.Digest.Encoded(), nil -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion. -func (m *Schema2) CanChangeLayerCompression(mimeType string) bool { - return compressionVariantsRecognizeMIMEType(schema2CompressionMIMETypeSets, mimeType) -} diff --git a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go b/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go deleted file mode 100644 index c958a3fa3a..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/docker_schema2_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package manifest - -import ( - "github.com/containers/image/v5/internal/manifest" -) - -// Schema2PlatformSpec describes the platform which a particular manifest is -// specialized for. -type Schema2PlatformSpec = manifest.Schema2PlatformSpec - -// Schema2ManifestDescriptor references a platform-specific manifest. -type Schema2ManifestDescriptor = manifest.Schema2ManifestDescriptor - -// Schema2List is a list of platform-specific manifests. -type Schema2List = manifest.Schema2ListPublic - -// Schema2ListFromComponents creates a Schema2 manifest list instance from the -// supplied data. -func Schema2ListFromComponents(components []Schema2ManifestDescriptor) *Schema2List { - return manifest.Schema2ListPublicFromComponents(components) -} - -// Schema2ListClone creates a deep copy of the passed-in list. -func Schema2ListClone(list *Schema2List) *Schema2List { - return manifest.Schema2ListPublicClone(list) -} - -// Schema2ListFromManifest creates a Schema2 manifest list instance from marshalled -// JSON, presumably generated by encoding a Schema2 manifest list. -func Schema2ListFromManifest(manifestBlob []byte) (*Schema2List, error) { - return manifest.Schema2ListPublicFromManifest(manifestBlob) -} diff --git a/vendor/github.com/containers/image/v5/manifest/list.go b/vendor/github.com/containers/image/v5/manifest/list.go deleted file mode 100644 index 1d6fdc9f56..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/list.go +++ /dev/null @@ -1,35 +0,0 @@ -package manifest - -import ( - "github.com/containers/image/v5/internal/manifest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -var ( - // SupportedListMIMETypes is a list of the manifest list types that we know how to - // read/manipulate/write. - SupportedListMIMETypes = []string{ - DockerV2ListMediaType, - imgspecv1.MediaTypeImageIndex, - } -) - -// List is an interface for parsing, modifying lists of image manifests. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1Index) and access the public members -// directly. -type List = manifest.ListPublic - -// ListUpdate includes the fields which a List's UpdateInstances() method will modify. -type ListUpdate = manifest.ListUpdate - -// ListFromBlob parses a list of manifests. -func ListFromBlob(manifestBlob []byte, manifestMIMEType string) (List, error) { - return manifest.ListPublicFromBlob(manifestBlob, manifestMIMEType) -} - -// ConvertListToMIMEType converts the passed-in manifest list to a manifest -// list of the specified type. -func ConvertListToMIMEType(list List, manifestMIMEType string) (List, error) { - return list.ConvertToMIMEType(manifestMIMEType) -} diff --git a/vendor/github.com/containers/image/v5/manifest/manifest.go b/vendor/github.com/containers/image/v5/manifest/manifest.go deleted file mode 100644 index d8f37eb45d..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/manifest.go +++ /dev/null @@ -1,170 +0,0 @@ -package manifest - -import ( - "fmt" - - "github.com/containers/image/v5/internal/manifest" - "github.com/containers/image/v5/types" - "github.com/containers/libtrust" - digest "github.com/opencontainers/go-digest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// FIXME: Should we just use docker/distribution and docker/docker implementations directly? - -// FIXME(runcom, mitr): should we have a mediatype pkg?? -const ( - // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 - DockerV2Schema1MediaType = manifest.DockerV2Schema1MediaType - // DockerV2Schema1SignedMediaType MIME type represents Docker manifest schema 1 with a JWS signature - DockerV2Schema1SignedMediaType = manifest.DockerV2Schema1SignedMediaType - // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2 - DockerV2Schema2MediaType = manifest.DockerV2Schema2MediaType - // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs. - DockerV2Schema2ConfigMediaType = manifest.DockerV2Schema2ConfigMediaType - // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers. - DockerV2Schema2LayerMediaType = manifest.DockerV2Schema2LayerMediaType - // DockerV2SchemaLayerMediaTypeUncompressed is the mediaType used for uncompressed layers. - DockerV2SchemaLayerMediaTypeUncompressed = manifest.DockerV2SchemaLayerMediaTypeUncompressed - // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list - DockerV2ListMediaType = manifest.DockerV2ListMediaType - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaType = manifest.DockerV2Schema2ForeignLayerMediaType - // DockerV2Schema2ForeignLayerMediaType is the MIME type used for gzipped schema 2 foreign layers. - DockerV2Schema2ForeignLayerMediaTypeGzip = manifest.DockerV2Schema2ForeignLayerMediaTypeGzip -) - -// NonImageArtifactError (detected via errors.As) is used when asking for an image-specific operation -// on an object which is not a “container image” in the standard sense (e.g. an OCI artifact) -type NonImageArtifactError = manifest.NonImageArtifactError - -// SupportedSchema2MediaType checks if the specified string is a supported Docker v2s2 media type. -func SupportedSchema2MediaType(m string) error { - switch m { - case DockerV2ListMediaType, DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema2ConfigMediaType, DockerV2Schema2ForeignLayerMediaType, DockerV2Schema2ForeignLayerMediaTypeGzip, DockerV2Schema2LayerMediaType, DockerV2Schema2MediaType, DockerV2SchemaLayerMediaTypeUncompressed: - return nil - default: - return fmt.Errorf("unsupported docker v2s2 media type: %q", m) - } -} - -// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource -// should request from the backend unless directed otherwise. -var DefaultRequestedManifestMIMETypes = []string{ - imgspecv1.MediaTypeImageManifest, - DockerV2Schema2MediaType, - DockerV2Schema1SignedMediaType, - DockerV2Schema1MediaType, - DockerV2ListMediaType, - imgspecv1.MediaTypeImageIndex, -} - -// Manifest is an interface for parsing, modifying image manifests in isolation. -// Callers can either use this abstract interface without understanding the details of the formats, -// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members -// directly. -// -// See types.Image for functionality not limited to manifests, including format conversions and config parsing. -// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. -type Manifest interface { - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - ConfigInfo() types.BlobInfo - // LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []LayerInfo - // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) - UpdateLayerInfos(layerInfos []types.BlobInfo) error - - // ImageID computes an ID which can uniquely identify this image by its contents, irrespective - // of which (of possibly more than one simultaneously valid) reference was used to locate the - // image, and unchanged by whether or how the layers are compressed. The result takes the form - // of the hexadecimal portion of a digest.Digest. - ImageID(diffIDs []digest.Digest) (string, error) - - // Inspect returns various information for (skopeo inspect) parsed from the manifest, - // incorporating information from a configuration blob returned by configGetter, if - // the underlying image format is expected to include a configuration blob. - Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) - - // Serialize returns the manifest in a blob format. - // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! - Serialize() ([]byte, error) -} - -// LayerInfo is an extended version of types.BlobInfo for low-level users of Manifest.LayerInfos. -type LayerInfo struct { - types.BlobInfo - EmptyLayer bool // The layer is an “empty”/“throwaway” one, and may or may not be physically represented in various transport / storage systems. false if the manifest type does not have the concept. -} - -// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. -// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest, -// but we may not have such metadata available (e.g. when the manifest is a local file). -func GuessMIMEType(manifestBlob []byte) string { - return manifest.GuessMIMEType(manifestBlob) -} - -// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures. -func Digest(manifestBlob []byte) (digest.Digest, error) { - return manifest.Digest(manifestBlob) -} - -// MatchesDigest returns true iff the manifest matches expectedDigest. -// Error may be set if this returns false. -// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified, -// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob. -func MatchesDigest(manifestBlob []byte, expectedDigest digest.Digest) (bool, error) { - return manifest.MatchesDigest(manifestBlob, expectedDigest) -} - -// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest. -// This is useful to make the manifest acceptable to a docker/distribution registry (even though nothing needs or wants the JWS signature). -func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { - key, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, err // Coverage: This can fail only if rand.Reader fails. - } - - js, err := libtrust.NewJSONSignature(manifest) - if err != nil { - return nil, err - } - if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails. - return nil, err - } - return js.PrettySignature("signatures") -} - -// MIMETypeIsMultiImage returns true if mimeType is a list of images -func MIMETypeIsMultiImage(mimeType string) bool { - return mimeType == DockerV2ListMediaType || mimeType == imgspecv1.MediaTypeImageIndex -} - -// MIMETypeSupportsEncryption returns true if the mimeType supports encryption -func MIMETypeSupportsEncryption(mimeType string) bool { - return mimeType == imgspecv1.MediaTypeImageManifest -} - -// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, -// centralizing various workarounds. -func NormalizedMIMEType(input string) string { - return manifest.NormalizedMIMEType(input) -} - -// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type -func FromBlob(manblob []byte, mt string) (Manifest, error) { - nmt := NormalizedMIMEType(mt) - switch nmt { - case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: - return Schema1FromManifest(manblob) - case imgspecv1.MediaTypeImageManifest: - return OCI1FromManifest(manblob) - case DockerV2Schema2MediaType: - return Schema2FromManifest(manblob) - case DockerV2ListMediaType, imgspecv1.MediaTypeImageIndex: - return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") - } - // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. - return nil, fmt.Errorf("Unimplemented manifest MIME type %q (normalized as %q)", mt, nmt) -} diff --git a/vendor/github.com/containers/image/v5/manifest/oci.go b/vendor/github.com/containers/image/v5/manifest/oci.go deleted file mode 100644 index a18425d0e5..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/oci.go +++ /dev/null @@ -1,276 +0,0 @@ -package manifest - -import ( - "encoding/json" - "fmt" - "slices" - "strings" - - "github.com/containers/image/v5/internal/manifest" - compressiontypes "github.com/containers/image/v5/pkg/compression/types" - "github.com/containers/image/v5/types" - ociencspec "github.com/containers/ocicrypt/spec" - "github.com/opencontainers/go-digest" - "github.com/opencontainers/image-spec/specs-go" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// BlobInfoFromOCI1Descriptor returns a types.BlobInfo based on the input OCI1 descriptor. -func BlobInfoFromOCI1Descriptor(desc imgspecv1.Descriptor) types.BlobInfo { - return types.BlobInfo{ - Digest: desc.Digest, - Size: desc.Size, - URLs: desc.URLs, - Annotations: desc.Annotations, - MediaType: desc.MediaType, - } -} - -// OCI1 is a manifest.Manifest implementation for OCI images. -// The underlying data from imgspecv1.Manifest is also available. -type OCI1 struct { - imgspecv1.Manifest -} - -// SupportedOCI1MediaType checks if the specified string is a supported OCI1 -// media type. -// -// Deprecated: blindly rejecting unknown MIME types when the consumer does not -// need to process the input just reduces interoperability (and violates the -// standard) with no benefit, and that this function does not check that the -// media type is appropriate for any specific purpose, so it’s not all that -// useful for validation anyway. -func SupportedOCI1MediaType(m string) error { - switch m { - case imgspecv1.MediaTypeDescriptor, imgspecv1.MediaTypeImageConfig, - imgspecv1.MediaTypeImageLayer, imgspecv1.MediaTypeImageLayerGzip, imgspecv1.MediaTypeImageLayerZstd, - imgspecv1.MediaTypeImageLayerNonDistributable, imgspecv1.MediaTypeImageLayerNonDistributableGzip, imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - imgspecv1.MediaTypeImageManifest, - imgspecv1.MediaTypeLayoutHeader, - ociencspec.MediaTypeLayerEnc, ociencspec.MediaTypeLayerGzipEnc: - return nil - default: - return fmt.Errorf("unsupported OCIv1 media type: %q", m) - } -} - -// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. -func OCI1FromManifest(manifestBlob []byte) (*OCI1, error) { - oci1 := OCI1{} - if err := json.Unmarshal(manifestBlob, &oci1); err != nil { - return nil, err - } - if err := manifest.ValidateUnambiguousManifestFormat(manifestBlob, imgspecv1.MediaTypeImageManifest, - manifest.AllowedFieldConfig|manifest.AllowedFieldLayers); err != nil { - return nil, err - } - return &oci1, nil -} - -// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. -func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { - return &OCI1{ - imgspecv1.Manifest{ - Versioned: specs.Versioned{SchemaVersion: 2}, - MediaType: imgspecv1.MediaTypeImageManifest, - Config: config, - Layers: layers, - }, - } -} - -// OCI1Clone creates a copy of the supplied OCI1 manifest. -func OCI1Clone(src *OCI1) *OCI1 { - return &OCI1{ - Manifest: src.Manifest, - } -} - -// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. -func (m *OCI1) ConfigInfo() types.BlobInfo { - return BlobInfoFromOCI1Descriptor(m.Config) -} - -// LayerInfos returns a list of LayerInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). -// The Digest field is guaranteed to be provided; Size may be -1. -// WARNING: The list may contain duplicates, and they are semantically relevant. -func (m *OCI1) LayerInfos() []LayerInfo { - blobs := make([]LayerInfo, 0, len(m.Layers)) - for _, layer := range m.Layers { - blobs = append(blobs, LayerInfo{ - BlobInfo: BlobInfoFromOCI1Descriptor(layer), - EmptyLayer: false, - }) - } - return blobs -} - -var oci1CompressionMIMETypeSets = []compressionMIMETypeSet{ - { - mtsUncompressed: imgspecv1.MediaTypeImageLayerNonDistributable, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableGzip, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerNonDistributableZstd, //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - }, - { - mtsUncompressed: imgspecv1.MediaTypeImageLayer, - compressiontypes.GzipAlgorithmName: imgspecv1.MediaTypeImageLayerGzip, - compressiontypes.ZstdAlgorithmName: imgspecv1.MediaTypeImageLayerZstd, - }, -} - -// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls+mediatype), in order (the root layer first, and then successive layered layers) -// The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if any of the layerInfos includes a combination of CompressionOperation and -// CompressionAlgorithm that isn't supported by OCI. -// -// It’s generally the caller’s responsibility to determine whether a particular edit is acceptable, rather than relying on -// failures of this function, because the layer is typically created _before_ UpdateLayerInfos is called, because UpdateLayerInfos needs -// to know the final digest). See OCI1.CanChangeLayerCompression for some help in determining this; other aspects like compression -// algorithms that might not be supported by a format, or the limited set of MIME types accepted for encryption, are not currently -// handled — that logic should eventually also be provided as OCI1 methods, not hard-coded in callers. -func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { - if len(m.Layers) != len(layerInfos) { - return fmt.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) - } - original := m.Layers - m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) - for i, info := range layerInfos { - mimeType := original[i].MediaType - if info.CryptoOperation == types.Decrypt { - decMimeType, err := getDecryptedMediaType(mimeType) - if err != nil { - return fmt.Errorf("error preparing updated manifest: decryption specified but original mediatype is not encrypted: %q", mimeType) - } - mimeType = decMimeType - } - mimeType, err := updatedMIMEType(oci1CompressionMIMETypeSets, mimeType, info) - if err != nil { - return fmt.Errorf("preparing updated manifest, layer %q: %w", info.Digest, err) - } - if info.CryptoOperation == types.Encrypt { - encMediaType, err := getEncryptedMediaType(mimeType) - if err != nil { - return fmt.Errorf("error preparing updated manifest: encryption specified but no counterpart for mediatype: %q", mimeType) - } - mimeType = encMediaType - } - - m.Layers[i].MediaType = mimeType - m.Layers[i].Digest = info.Digest - m.Layers[i].Size = info.Size - m.Layers[i].Annotations = info.Annotations - m.Layers[i].URLs = info.URLs - } - return nil -} - -// getEncryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support encryption -func getEncryptedMediaType(mediatype string) (string, error) { - parts := strings.Split(mediatype, "+") - if slices.Contains(parts[1:], "encrypted") { - return "", fmt.Errorf("unsupported mediaType: %q already encrypted", mediatype) - } - unsuffixedMediatype := parts[0] - switch unsuffixedMediatype { - case DockerV2Schema2LayerMediaType, imgspecv1.MediaTypeImageLayer, - imgspecv1.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // NonDistributable layers are deprecated, but we want to continue to support manipulating pre-existing images. - return mediatype + "+encrypted", nil - } - - return "", fmt.Errorf("unsupported mediaType to encrypt: %q", mediatype) -} - -// getDecryptedMediaType will return the mediatype to its encrypted counterpart and return -// an error if the mediatype does not support decryption -func getDecryptedMediaType(mediatype string) (string, error) { - res, ok := strings.CutSuffix(mediatype, "+encrypted") - if !ok { - return "", fmt.Errorf("unsupported mediaType to decrypt: %q", mediatype) - } - - return res, nil -} - -// Serialize returns the manifest in a blob format. -// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! -func (m *OCI1) Serialize() ([]byte, error) { - return json.Marshal(*m) -} - -// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. -func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - // We could return at least the layers, but that’s already available in a better format via types.Image.LayerInfos. - // Most software calling this without human intervention is going to expect the values to be realistic and relevant, - // and is probably better served by failing; we can always re-visit that later if we fail now, but - // if we started returning some data for OCI artifacts now, we couldn’t start failing in this function later. - return nil, manifest.NewNonImageArtifactError(&m.Manifest) - } - - config, err := configGetter(m.ConfigInfo()) - if err != nil { - return nil, err - } - v1 := &imgspecv1.Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - d1 := &Schema2V1Image{} - if err := json.Unmarshal(config, d1); err != nil { - return nil, err - } - layerInfos := m.LayerInfos() - i := &types.ImageInspectInfo{ - Tag: "", - Created: v1.Created, - DockerVersion: d1.DockerVersion, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Variant: v1.Variant, - Os: v1.OS, - Layers: layerInfosToStrings(layerInfos), - LayersData: imgInspectLayersFromLayerInfos(layerInfos), - Env: v1.Config.Env, - Author: v1.Author, - } - return i, nil -} - -// ImageID computes an ID which can uniquely identify this image by its contents. -func (m *OCI1) ImageID(diffIDs []digest.Digest) (string, error) { - // The way m.Config.Digest “uniquely identifies” an image is - // by containing RootFS.DiffIDs, which identify the layers of the image. - // For non-image artifacts, the we can’t expect the config to change - // any time the other layers (semantically) change, so this approach of - // distinguishing objects only by m.Config.Digest doesn’t work in general. - // - // Any caller of this method presumably wants to disambiguate the same - // images with a different representation, but doesn’t want to disambiguate - // representations (by using a manifest digest). So, submitting a non-image - // artifact to such a caller indicates an expectation mismatch. - // So, we just fail here instead of inventing some other ID value (e.g. - // by combining the config and blob layer digests). That still - // gives us the option to not fail, and return some value, in the future, - // without committing to that approach now. - // (The only known caller of ImageID is storage/storageImageDestination.computeID, - // which can’t work with non-image artifacts.) - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return "", manifest.NewNonImageArtifactError(&m.Manifest) - } - - if err := m.Config.Digest.Validate(); err != nil { - return "", err - } - return m.Config.Digest.Encoded(), nil -} - -// CanChangeLayerCompression returns true if we can compress/decompress layers with mimeType in the current image -// (and the code can handle that). -// NOTE: Even if this returns true, the relevant format might not accept all compression algorithms; the set of accepted -// algorithms depends not on the current format, but possibly on the target of a conversion. -func (m *OCI1) CanChangeLayerCompression(mimeType string) bool { - if m.Config.MediaType != imgspecv1.MediaTypeImageConfig { - return false - } - return compressionVariantsRecognizeMIMEType(oci1CompressionMIMETypeSets, mimeType) -} diff --git a/vendor/github.com/containers/image/v5/manifest/oci_index.go b/vendor/github.com/containers/image/v5/manifest/oci_index.go deleted file mode 100644 index 193b08935a..0000000000 --- a/vendor/github.com/containers/image/v5/manifest/oci_index.go +++ /dev/null @@ -1,27 +0,0 @@ -package manifest - -import ( - "github.com/containers/image/v5/internal/manifest" - imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// OCI1Index is just an alias for the OCI index type, but one which we can -// provide methods for. -type OCI1Index = manifest.OCI1IndexPublic - -// OCI1IndexFromComponents creates an OCI1 image index instance from the -// supplied data. -func OCI1IndexFromComponents(components []imgspecv1.Descriptor, annotations map[string]string) *OCI1Index { - return manifest.OCI1IndexPublicFromComponents(components, annotations) -} - -// OCI1IndexClone creates a deep copy of the passed-in index. -func OCI1IndexClone(index *OCI1Index) *OCI1Index { - return manifest.OCI1IndexPublicClone(index) -} - -// OCI1IndexFromManifest creates an OCI1 manifest index instance from marshalled -// JSON, presumably generated by encoding a OCI1 manifest index. -func OCI1IndexFromManifest(manifestBlob []byte) (*OCI1Index, error) { - return manifest.OCI1IndexPublicFromManifest(manifestBlob) -} diff --git a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go b/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go deleted file mode 100644 index 9a2219e795..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/blobinfocache/none/none.go +++ /dev/null @@ -1,63 +0,0 @@ -// Package none implements a dummy BlobInfoCache which records no data. -package none - -import ( - "github.com/containers/image/v5/internal/blobinfocache" - "github.com/containers/image/v5/types" - "github.com/opencontainers/go-digest" -) - -// noCache implements a dummy BlobInfoCache which records no data. -type noCache struct { -} - -// NoCache implements BlobInfoCache by not recording any data. -// -// This exists primarily for implementations of configGetter for -// Manifest.Inspect, because configs only have one representation. -// Any use of BlobInfoCache with blobs should usually use at least a -// short-lived cache, ideally blobinfocache.DefaultCache. -var NoCache blobinfocache.BlobInfoCache2 = blobinfocache.FromBlobInfoCache(&noCache{}) - -// UncompressedDigest returns an uncompressed digest corresponding to anyDigest. -// May return anyDigest if it is known to be uncompressed. -// Returns "" if nothing is known about the digest (it may be compressed or uncompressed). -func (noCache) UncompressedDigest(anyDigest digest.Digest) digest.Digest { - return "" -} - -// RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. -// It’s allowed for anyDigest == uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) { -} - -// UncompressedDigestForTOC returns an uncompressed digest corresponding to anyDigest. -// Returns "" if the uncompressed digest is unknown. -func (noCache) UncompressedDigestForTOC(tocDigest digest.Digest) digest.Digest { - return "" -} - -// RecordTOCUncompressedPair records that the tocDigest corresponds to uncompressed. -// WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. -// because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. -// (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) -func (noCache) RecordTOCUncompressedPair(tocDigest digest.Digest, uncompressed digest.Digest) { -} - -// RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, -// and can be reused given the opaque location data. -func (noCache) RecordKnownLocation(transport types.ImageTransport, scope types.BICTransportScope, blobDigest digest.Digest, location types.BICLocationReference) { -} - -// CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused -// within the specified (transport scope) (if they still exist, which is not guaranteed). -// -// If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, -// data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same -// uncompressed digest. -func (noCache) CandidateLocations(transport types.ImageTransport, scope types.BICTransportScope, digest digest.Digest, canSubstitute bool) []types.BICReplacementCandidate { - return nil -} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go b/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go deleted file mode 100644 index e715705b43..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/compression/internal/types.go +++ /dev/null @@ -1,80 +0,0 @@ -package internal - -import "io" - -// CompressorFunc writes the compressed stream to the given writer using the specified compression level. -// -// Compressing a stream may create integrity data that allows consuming the compressed byte stream -// while only using subsets of the compressed data (if the compressed data is seekable and most -// of the uncompressed data is already present via other means), while still protecting integrity -// of the compressed stream against unwanted modification. (In OCI container images, this metadata -// is usually carried in manifest annotations.) -// -// If the compression generates such metadata, it is written to the provided metadata map. -// -// The caller must call Close() on the stream (even if the input stream does not need closing!). -type CompressorFunc func(io.Writer, map[string]string, *int) (io.WriteCloser, error) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc func(io.Reader) (io.ReadCloser, error) - -// Algorithm is a compression algorithm that can be used for CompressStream. -type Algorithm struct { - name string - baseVariantName string - prefix []byte // Initial bytes of a stream compressed using this algorithm, or empty to disable detection. - decompressor DecompressorFunc - compressor CompressorFunc -} - -// NewAlgorithm creates an Algorithm instance. -// nontrivialBaseVariantName is typically "". -// This function exists so that Algorithm instances can only be created by code that -// is allowed to import this internal subpackage. -func NewAlgorithm(name, nontrivialBaseVariantName string, prefix []byte, decompressor DecompressorFunc, compressor CompressorFunc) Algorithm { - baseVariantName := name - if nontrivialBaseVariantName != "" { - baseVariantName = nontrivialBaseVariantName - } - return Algorithm{ - name: name, - baseVariantName: baseVariantName, - prefix: prefix, - decompressor: decompressor, - compressor: compressor, - } -} - -// Name returns the name for the compression algorithm. -func (c Algorithm) Name() string { - return c.name -} - -// BaseVariantName returns the name of the “base variant” of the compression algorithm. -// It is either equal to Name() of the same algorithm, or equal to Name() of some other Algorithm (the “base variant”). -// This supports a single level of “is-a” relationship between compression algorithms, e.g. where "zstd:chunked" data is valid "zstd" data. -func (c Algorithm) BaseVariantName() string { - return c.baseVariantName -} - -// AlgorithmCompressor returns the compressor field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmCompressor(algo Algorithm) CompressorFunc { - return algo.compressor -} - -// AlgorithmDecompressor returns the decompressor field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmDecompressor(algo Algorithm) DecompressorFunc { - return algo.decompressor -} - -// AlgorithmPrefix returns the prefix field of algo. -// This is a function instead of a public method so that it is only callable by code -// that is allowed to import this internal subpackage. -func AlgorithmPrefix(algo Algorithm) []byte { - return algo.prefix -} diff --git a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go b/vendor/github.com/containers/image/v5/pkg/compression/types/types.go deleted file mode 100644 index 43d03b601c..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/compression/types/types.go +++ /dev/null @@ -1,41 +0,0 @@ -package types - -import ( - "github.com/containers/image/v5/pkg/compression/internal" -) - -// DecompressorFunc returns the decompressed stream, given a compressed stream. -// The caller must call Close() on the decompressed stream (even if the compressed input stream does not need closing!). -type DecompressorFunc = internal.DecompressorFunc - -// Algorithm is a compression algorithm provided and supported by pkg/compression. -// It can’t be supplied from the outside. -type Algorithm = internal.Algorithm - -const ( - // GzipAlgorithmName is the name used by pkg/compression.Gzip. - // NOTE: Importing only this /types package does not inherently guarantee a Gzip algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - GzipAlgorithmName = "gzip" - // Bzip2AlgorithmName is the name used by pkg/compression.Bzip2. - // NOTE: Importing only this /types package does not inherently guarantee a Bzip2 algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - Bzip2AlgorithmName = "bzip2" - // XzAlgorithmName is the name used by pkg/compression.Xz. - // NOTE: Importing only this /types package does not inherently guarantee a Xz algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - XzAlgorithmName = "Xz" - // ZstdAlgorithmName is the name used by pkg/compression.Zstd. - // NOTE: Importing only this /types package does not inherently guarantee a Zstd algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - ZstdAlgorithmName = "zstd" - // ZstdChunkedAlgorithmName is the name used by pkg/compression.ZstdChunked. - // NOTE: Importing only this /types package does not inherently guarantee a ZstdChunked algorithm - // will actually be available. (In fact it is intended for this types package not to depend - // on any of the implementations.) - ZstdChunkedAlgorithmName = "zstd:chunked" -) diff --git a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go b/vendor/github.com/containers/image/v5/pkg/docker/config/config.go deleted file mode 100644 index 243b13c88a..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/docker/config/config.go +++ /dev/null @@ -1,950 +0,0 @@ -package config - -import ( - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io/fs" - "iter" - "maps" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/pkg/sysregistriesv2" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/ioutils" - helperclient "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/sirupsen/logrus" -) - -type dockerAuthConfig struct { - Auth string `json:"auth,omitempty"` - IdentityToken string `json:"identitytoken,omitempty"` -} - -type dockerConfigFile struct { - AuthConfigs map[string]dockerAuthConfig `json:"auths"` - CredHelpers map[string]string `json:"credHelpers,omitempty"` -} - -var ( - defaultPerUIDPathFormat = filepath.FromSlash("/run/containers/%d/auth.json") - xdgConfigHomePath = filepath.FromSlash("containers/auth.json") - xdgRuntimeDirPath = filepath.FromSlash("containers/auth.json") - dockerHomePath = filepath.FromSlash(".docker/config.json") - dockerLegacyHomePath = ".dockercfg" - nonLinuxAuthFilePath = filepath.FromSlash(".config/containers/auth.json") - - // ErrNotLoggedIn is returned for users not logged into a registry - // that they are trying to logout of - ErrNotLoggedIn = errors.New("not logged in") - // ErrNotSupported is returned for unsupported methods - ErrNotSupported = errors.New("not supported") -) - -// authPath combines a path to a file with container registry credentials, -// along with expected properties of that path (currently just whether it's -// legacy format or not). -type authPath struct { - path string - legacyFormat bool -} - -// newAuthPathDefault constructs an authPath in non-legacy format. -func newAuthPathDefault(path string) authPath { - return authPath{path: path, legacyFormat: false} -} - -// GetAllCredentials returns the registry credentials for all registries stored -// in any of the configured credential helpers. -func GetAllCredentials(sys *types.SystemContext) (map[string]types.DockerAuthConfig, error) { - // To keep things simple, let's first extract all registries from all - // possible sources, and then call `GetCredentials` on them. That - // prevents us from having to reverse engineer the logic in - // `GetCredentials`. - allKeys := set.New[string]() - - // To use GetCredentials, we must at least convert the URL forms into host names. - // While we're at it, we’ll also canonicalize docker.io to the standard format. - normalizedDockerIORegistry := normalizeRegistry("docker.io") - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return nil, err - } - for _, helper := range helpers { - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - for _, path := range getAuthFilePaths(sys, homedir.Get()) { - // parse returns an empty map in case the path doesn't exist. - fileContents, err := path.parse() - if err != nil { - return nil, fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - // Credential helpers in the auth file have a - // direct mapping to a registry, so we can just - // walk the map. - allKeys.AddSeq(maps.Keys(fileContents.CredHelpers)) - for key := range fileContents.AuthConfigs { - key := normalizeAuthFileKey(key, path.legacyFormat) - if key == normalizedDockerIORegistry { - key = "docker.io" - } - allKeys.Add(key) - } - } - // External helpers. - default: - creds, err := listCredsInCredHelper(helper) - if err != nil { - logrus.Debugf("Error listing credentials stored in credential helper %s: %v", helper, err) - if errors.Is(err, exec.ErrNotFound) { - creds = nil // It's okay if the helper doesn't exist. - } else { - return nil, err - } - } - allKeys.AddSeq(maps.Keys(creds)) - } - } - - // Now use `GetCredentials` to the specific auth configs for each - // previously listed registry. - allCreds := make(map[string]types.DockerAuthConfig) - for key := range allKeys.All() { - creds, err := GetCredentials(sys, key) - if err != nil { - // Note: we rely on the logging in `GetCredentials`. - return nil, err - } - if creds != (types.DockerAuthConfig{}) { - allCreds[key] = creds - } - } - - return allCreds, nil -} - -// getAuthFilePaths returns a slice of authPaths based on the system context -// in the order they should be searched. Note that some paths may not exist. -// The homeDir parameter should always be homedir.Get(), and is only intended to be overridden -// by tests. -func getAuthFilePaths(sys *types.SystemContext, homeDir string) []authPath { - paths := []authPath{} - pathToAuth, userSpecifiedPath, err := getPathToAuth(sys) - if err == nil { - paths = append(paths, pathToAuth) - } else { - // Error means that the path set for XDG_RUNTIME_DIR does not exist - // but we don't want to completely fail in the case that the user is pulling a public image - // Logging the error as a warning instead and moving on to pulling the image - logrus.Warnf("%v: Trying to pull image in the event that it is a public image.", err) - } - if !userSpecifiedPath { - xdgCfgHome := os.Getenv("XDG_CONFIG_HOME") - if xdgCfgHome == "" { - xdgCfgHome = filepath.Join(homeDir, ".config") - } - paths = append(paths, newAuthPathDefault(filepath.Join(xdgCfgHome, xdgConfigHomePath))) - if dockerConfig := os.Getenv("DOCKER_CONFIG"); dockerConfig != "" { - paths = append(paths, newAuthPathDefault(filepath.Join(dockerConfig, "config.json"))) - } else { - paths = append(paths, - newAuthPathDefault(filepath.Join(homeDir, dockerHomePath)), - ) - } - paths = append(paths, - authPath{path: filepath.Join(homeDir, dockerLegacyHomePath), legacyFormat: true}, - ) - } - return paths -} - -// GetCredentials returns the registry credentials matching key, appropriate for -// sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -// A valid key is a repository, a namespace within a registry, or a registry hostname. -// -// GetCredentialsForRef should almost always be used in favor of this API. -func GetCredentials(sys *types.SystemContext, key string) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, key, homedir.Get()) -} - -// GetCredentialsForRef returns the registry credentials necessary for -// accessing ref on the registry ref points to, -// appropriate for sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -func GetCredentialsForRef(sys *types.SystemContext, ref reference.Named) (types.DockerAuthConfig, error) { - return getCredentialsWithHomeDir(sys, ref.Name(), homedir.Get()) -} - -// getCredentialsWithHomeDir is an internal implementation detail of -// GetCredentialsForRef and GetCredentials. It exists only to allow testing it -// with an artificial home directory. -func getCredentialsWithHomeDir(sys *types.SystemContext, key, homeDir string) (types.DockerAuthConfig, error) { - _, err := validateKey(key) - if err != nil { - return types.DockerAuthConfig{}, err - } - - if sys != nil && sys.DockerAuthConfig != nil { - logrus.Debugf("Returning credentials for %s from DockerAuthConfig", key) - return *sys.DockerAuthConfig, nil - } - - var registry string // We compute this once because it is used in several places. - if firstSlash := strings.IndexRune(key, '/'); firstSlash != -1 { - registry = key[:firstSlash] - } else { - registry = key - } - - // Anonymous function to query credentials from auth files. - getCredentialsFromAuthFiles := func() (types.DockerAuthConfig, string, error) { - for _, path := range getAuthFilePaths(sys, homeDir) { - creds, err := findCredentialsInFile(key, registry, path) - if err != nil { - return types.DockerAuthConfig{}, "", err - } - - if creds != (types.DockerAuthConfig{}) { - return creds, path.path, nil - } - } - return types.DockerAuthConfig{}, "", nil - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return types.DockerAuthConfig{}, err - } - - var multiErr []error - for _, helper := range helpers { - var ( - creds types.DockerAuthConfig - helperKey string - credHelperPath string - err error - ) - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - helperKey = key - creds, credHelperPath, err = getCredentialsFromAuthFiles() - // External helpers. - default: - // This intentionally uses "registry", not "key"; we don't support namespaced - // credentials in helpers, but a "registry" is a valid parent of "key". - helperKey = registry - creds, err = getCredsFromCredHelper(helper, registry) - } - if err != nil { - logrus.Debugf("Error looking up credentials for %s in credential helper %s: %v", helperKey, helper, err) - multiErr = append(multiErr, err) - continue - } - if creds != (types.DockerAuthConfig{}) { - msg := fmt.Sprintf("Found credentials for %s in credential helper %s", helperKey, helper) - if credHelperPath != "" { - msg = fmt.Sprintf("%s in file %s", msg, credHelperPath) - } - logrus.Debug(msg) - return creds, nil - } - } - if multiErr != nil { - return types.DockerAuthConfig{}, multierr.Format("errors looking up credentials:\n\t* ", "\nt* ", "\n", multiErr) - } - - logrus.Debugf("No credentials for %s found", key) - return types.DockerAuthConfig{}, nil -} - -// GetAuthentication returns the registry credentials matching key, appropriate for -// sys and the users’ configuration. -// If an entry is not found, an empty struct is returned. -// A valid key is a repository, a namespace within a registry, or a registry hostname. -// -// Deprecated: This API only has support for username and password. To get the -// support for oauth2 in container registry authentication, we added the new -// GetCredentialsForRef and GetCredentials API. The new API should be used and this API is kept to -// maintain backward compatibility. -func GetAuthentication(sys *types.SystemContext, key string) (string, string, error) { - return getAuthenticationWithHomeDir(sys, key, homedir.Get()) -} - -// getAuthenticationWithHomeDir is an internal implementation detail of GetAuthentication, -// it exists only to allow testing it with an artificial home directory. -func getAuthenticationWithHomeDir(sys *types.SystemContext, key, homeDir string) (string, string, error) { - creds, err := getCredentialsWithHomeDir(sys, key, homeDir) - if err != nil { - return "", "", err - } - if creds.IdentityToken != "" { - return "", "", fmt.Errorf("non-empty identity token found and this API doesn't support it: %w", ErrNotSupported) - } - return creds.Username, creds.Password, nil -} - -// SetCredentials stores the username and password in a location -// appropriate for sys and the users’ configuration. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -// Returns a human-readable description of the location that was updated. -// NOTE: The return value is only intended to be read by humans; its form is not an API, -// it may change (or new forms can be added) any time. -func SetCredentials(sys *types.SystemContext, key, username, password string) (string, error) { - helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) - if err != nil { - return "", err - } - - // Make sure to collect all errors. - var multiErr []error - for _, helper := range helpers { - var desc string - var err error - switch helper { - // Special-case the built-in helpers for auth files. - case sysregistriesv2.AuthenticationFileHelper: - desc, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - if ch, exists := fileContents.CredHelpers[key]; exists { - if isNamespaced { - return false, "", unsupportedNamespaceErr(ch) - } - desc, err := setCredsInCredHelper(ch, key, username, password) - if err != nil { - return false, "", err - } - return false, desc, nil - } - creds := base64.StdEncoding.EncodeToString([]byte(username + ":" + password)) - newCreds := dockerAuthConfig{Auth: creds} - fileContents.AuthConfigs[key] = newCreds - return true, "", nil - }) - // External helpers. - default: - if isNamespaced { - err = unsupportedNamespaceErr(helper) - } else { - desc, err = setCredsInCredHelper(helper, key, username, password) - } - } - if err != nil { - multiErr = append(multiErr, err) - logrus.Debugf("Error storing credentials for %s in credential helper %s: %v", key, helper, err) - continue - } - logrus.Debugf("Stored credentials for %s in credential helper %s", key, helper) - return desc, nil - } - return "", multierr.Format("Errors storing credentials\n\t* ", "\n\t* ", "\n", multiErr) -} - -func unsupportedNamespaceErr(helper string) error { - return fmt.Errorf("namespaced key is not supported for credential helper %s", helper) -} - -// SetAuthentication stores the username and password in the credential helper or file -// See the documentation of SetCredentials for format of "key" -func SetAuthentication(sys *types.SystemContext, key, username, password string) error { - _, err := SetCredentials(sys, key, username, password) - return err -} - -// RemoveAuthentication removes credentials for `key` from all possible -// sources such as credential helpers and auth files. -// A valid key is a repository, a namespace within a registry, or a registry hostname; -// using forms other than just a registry may fail depending on configuration. -func RemoveAuthentication(sys *types.SystemContext, key string) error { - helpers, jsonEditor, key, isNamespaced, err := prepareForEdit(sys, key, true) - if err != nil { - return err - } - - isLoggedIn := false - - removeFromCredHelper := func(helper string) error { - if isNamespaced { - logrus.Debugf("Not removing credentials because namespaced keys are not supported for the credential helper: %s", helper) - return nil - } - err := deleteCredsFromCredHelper(helper, key) - if err == nil { - logrus.Debugf("Credentials for %q were deleted from credential helper %s", key, helper) - isLoggedIn = true - return nil - } - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", key, helper) - return nil - } - return fmt.Errorf("removing credentials for %s from credential helper %s: %w", key, helper, err) - } - - var multiErr []error - for _, helper := range helpers { - var err error - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - var helperErr error - if innerHelper, exists := fileContents.CredHelpers[key]; exists { - helperErr = removeFromCredHelper(innerHelper) - } - if _, ok := fileContents.AuthConfigs[key]; ok { - isLoggedIn = true - delete(fileContents.AuthConfigs, key) - } - return true, "", helperErr - }) - if err != nil { - multiErr = append(multiErr, err) - } - // External helpers. - default: - if err := removeFromCredHelper(helper); err != nil { - multiErr = append(multiErr, err) - } - } - } - - if multiErr != nil { - return multierr.Format("errors removing credentials\n\t* ", "\n\t*", "\n", multiErr) - } - if !isLoggedIn { - return ErrNotLoggedIn - } - - return nil -} - -// RemoveAllAuthentication deletes all the credentials stored in credential -// helpers and auth files. -func RemoveAllAuthentication(sys *types.SystemContext) error { - helpers, jsonEditor, _, _, err := prepareForEdit(sys, "", false) - if err != nil { - return err - } - - var multiErr []error - for _, helper := range helpers { - var err error - switch helper { - // Special-case the built-in helper for auth files. - case sysregistriesv2.AuthenticationFileHelper: - _, err = jsonEditor(sys, func(fileContents *dockerConfigFile) (bool, string, error) { - for registry, helper := range fileContents.CredHelpers { - // Helpers in auth files are expected - // to exist, so no special treatment - // for them. - if err := deleteCredsFromCredHelper(helper, registry); err != nil { - return false, "", err - } - } - fileContents.CredHelpers = make(map[string]string) - fileContents.AuthConfigs = make(map[string]dockerAuthConfig) - return true, "", nil - }) - // External helpers. - default: - var creds map[string]string - creds, err = listCredsInCredHelper(helper) - if err != nil { - if errors.Is(err, exec.ErrNotFound) { - // It's okay if the helper doesn't exist. - continue - } else { - break - } - } - for registry := range creds { - err = deleteCredsFromCredHelper(helper, registry) - if err != nil { - break - } - } - } - if err != nil { - logrus.Debugf("Error removing credentials from credential helper %s: %v", helper, err) - multiErr = append(multiErr, err) - continue - } - logrus.Debugf("All credentials removed from credential helper %s", helper) - } - - if multiErr != nil { - return multierr.Format("errors removing all credentials:\n\t* ", "\n\t* ", "\n", multiErr) - } - return nil -} - -// prepareForEdit processes sys and key (if keyRelevant) to return: -// - a list of credential helpers -// - a function which can be used to edit the JSON file -// - the key value to actually use in credential helpers / JSON -// - a boolean which is true if key is namespaced (and should not be used with credential helpers). -func prepareForEdit(sys *types.SystemContext, key string, keyRelevant bool) ([]string, func(*types.SystemContext, func(*dockerConfigFile) (bool, string, error)) (string, error), string, bool, error) { - var isNamespaced bool - if keyRelevant { - ns, err := validateKey(key) - if err != nil { - return nil, nil, "", false, err - } - isNamespaced = ns - } - - if sys != nil && sys.DockerCompatAuthFilePath != "" { - if sys.AuthFilePath != "" { - return nil, nil, "", false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") - } - if keyRelevant { - if isNamespaced { - return nil, nil, "", false, fmt.Errorf("Credentials cannot be recorded in Docker-compatible format with namespaced key %q", key) - } - if key == "docker.io" { - key = "https://index.docker.io/v1/" - } - } - - // Do not use helpers defined in sysregistriesv2 because Docker isn’t aware of them. - return []string{sysregistriesv2.AuthenticationFileHelper}, modifyDockerConfigJSON, key, false, nil - } - - helpers, err := sysregistriesv2.CredentialHelpers(sys) - if err != nil { - return nil, nil, "", false, err - } - - return helpers, modifyJSON, key, isNamespaced, nil -} - -func listCredsInCredHelper(credHelper string) (map[string]string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.List(p) -} - -// getPathToAuth gets the path of the auth.json file used for reading and writing credentials, -// and a boolean indicating whether the return value came from an explicit user choice (i.e. not defaults) -func getPathToAuth(sys *types.SystemContext) (authPath, bool, error) { - return getPathToAuthWithOS(sys, runtime.GOOS) -} - -// getPathToAuthWithOS is an internal implementation detail of getPathToAuth, -// it exists only to allow testing it with an artificial runtime.GOOS. -func getPathToAuthWithOS(sys *types.SystemContext, goOS string) (authPath, bool, error) { - if sys != nil { - if sys.AuthFilePath != "" && sys.DockerCompatAuthFilePath != "" { - return authPath{}, false, errors.New("AuthFilePath and DockerCompatAuthFilePath can not be set simultaneously") - } - if sys.AuthFilePath != "" { - return newAuthPathDefault(sys.AuthFilePath), true, nil - } - // When reading, we can process auth.json and Docker’s config.json with the same code. - // When writing, prepareForEdit chooses an appropriate jsonEditor implementation. - if sys.DockerCompatAuthFilePath != "" { - return newAuthPathDefault(sys.DockerCompatAuthFilePath), true, nil - } - if sys.LegacyFormatAuthFilePath != "" { - return authPath{path: sys.LegacyFormatAuthFilePath, legacyFormat: true}, true, nil - } - // Note: RootForImplicitAbsolutePaths should not affect paths starting with $HOME - if sys.RootForImplicitAbsolutePaths != "" && goOS == "linux" { - return newAuthPathDefault(filepath.Join(sys.RootForImplicitAbsolutePaths, fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid()))), false, nil - } - } - if goOS != "linux" { - return newAuthPathDefault(filepath.Join(homedir.Get(), nonLinuxAuthFilePath)), false, nil - } - - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - if runtimeDir != "" { - // This function does not in general need to separately check that the returned path exists; that’s racy, and callers will fail accessing the file anyway. - // We are checking for fs.ErrNotExist here only to give the user better guidance what to do in this special case. - err := fileutils.Exists(runtimeDir) - if errors.Is(err, fs.ErrNotExist) { - // This means the user set the XDG_RUNTIME_DIR variable and either forgot to create the directory - // or made a typo while setting the environment variable, - // so return an error referring to $XDG_RUNTIME_DIR instead of xdgRuntimeDirPath inside. - return authPath{}, false, fmt.Errorf("%q directory set by $XDG_RUNTIME_DIR does not exist. Either create the directory or unset $XDG_RUNTIME_DIR.: %w", runtimeDir, err) - } // else ignore err and let the caller fail accessing xdgRuntimeDirPath. - return newAuthPathDefault(filepath.Join(runtimeDir, xdgRuntimeDirPath)), false, nil - } - return newAuthPathDefault(fmt.Sprintf(defaultPerUIDPathFormat, os.Getuid())), false, nil -} - -// parse unmarshals the credentials stored in the auth.json file and returns it -// or returns an empty dockerConfigFile data structure if auth.json does not exist -// if the file exists and is empty, this function returns an error. -func (path authPath) parse() (dockerConfigFile, error) { - var fileContents dockerConfigFile - - raw, err := os.ReadFile(path.path) - if err != nil { - if os.IsNotExist(err) { - fileContents.AuthConfigs = map[string]dockerAuthConfig{} - return fileContents, nil - } - return dockerConfigFile{}, err - } - - if path.legacyFormat { - if err = json.Unmarshal(raw, &fileContents.AuthConfigs); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) - } - return fileContents, nil - } - - if err = json.Unmarshal(raw, &fileContents); err != nil { - return dockerConfigFile{}, fmt.Errorf("unmarshaling JSON at %q: %w", path.path, err) - } - - if fileContents.AuthConfigs == nil { - fileContents.AuthConfigs = map[string]dockerAuthConfig{} - } - if fileContents.CredHelpers == nil { - fileContents.CredHelpers = make(map[string]string) - } - - return fileContents, nil -} - -// modifyJSON finds an auth.json file, calls editor on the contents, and -// writes it back if editor returns true. -// Returns a human-readable description of the file, to be returned by SetCredentials. -// -// The editor may also return a human-readable description of the updated location; if it is "", -// the file itself is used. -func modifyJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { - path, _, err := getPathToAuth(sys) - if err != nil { - return "", err - } - if path.legacyFormat { - return "", fmt.Errorf("writes to %s using legacy format are not supported", path.path) - } - - dir := filepath.Dir(path.path) - if err = os.MkdirAll(dir, 0700); err != nil { - return "", err - } - - fileContents, err := path.parse() - if err != nil { - return "", fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - - updated, description, err := editor(&fileContents) - if err != nil { - return "", fmt.Errorf("updating %q: %w", path.path, err) - } - if updated { - newData, err := json.MarshalIndent(fileContents, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path.path, err) - } - - if err = ioutils.AtomicWriteFile(path.path, newData, 0600); err != nil { - return "", fmt.Errorf("writing to file %q: %w", path.path, err) - } - } - - if description == "" { - description = path.path - } - return description, nil -} - -// modifyDockerConfigJSON finds a docker config.json file, calls editor on the contents, and -// writes it back if editor returns true. -// Returns a human-readable description of the file, to be returned by SetCredentials. -// -// The editor may also return a human-readable description of the updated location; if it is "", -// the file itself is used. -func modifyDockerConfigJSON(sys *types.SystemContext, editor func(fileContents *dockerConfigFile) (bool, string, error)) (string, error) { - if sys == nil || sys.DockerCompatAuthFilePath == "" { - return "", errors.New("internal error: modifyDockerConfigJSON called with DockerCompatAuthFilePath not set") - } - path := sys.DockerCompatAuthFilePath - - dir := filepath.Dir(path) - if err := os.MkdirAll(dir, 0700); err != nil { - return "", err - } - - // Try hard not to clobber fields we don’t understand, even fields which may be added in future Docker versions. - var rawContents map[string]json.RawMessage - originalBytes, err := os.ReadFile(path) - switch { - case err == nil: - if err := json.Unmarshal(originalBytes, &rawContents); err != nil { - return "", fmt.Errorf("unmarshaling JSON at %q: %w", path, err) - } - case errors.Is(err, fs.ErrNotExist): - rawContents = map[string]json.RawMessage{} - default: // err != nil - return "", err - } - - syntheticContents := dockerConfigFile{ - AuthConfigs: map[string]dockerAuthConfig{}, - CredHelpers: map[string]string{}, - } - // json.Unmarshal also falls back to case-insensitive field matching; this code does not do that. Presumably - // config.json is mostly maintained by machines doing `docker login`, so the files should, hopefully, not contain field names with - // unexpected case. - if rawAuths, ok := rawContents["auths"]; ok { - // This conversion will lose fields we don’t know about; when updating an entry, we can’t tell whether an unknown field - // should be preserved or discarded (because it is made obsolete/unwanted with the new credentials). - // It might make sense to track which entries of "auths" we actually modified, and to not touch any others. - if err := json.Unmarshal(rawAuths, &syntheticContents.AuthConfigs); err != nil { - return "", fmt.Errorf(`unmarshaling "auths" in JSON at %q: %w`, path, err) - } - } - if rawCH, ok := rawContents["credHelpers"]; ok { - if err := json.Unmarshal(rawCH, &syntheticContents.CredHelpers); err != nil { - return "", fmt.Errorf(`unmarshaling "credHelpers" in JSON at %q: %w`, path, err) - - } - } - - updated, description, err := editor(&syntheticContents) - if err != nil { - return "", fmt.Errorf("updating %q: %w", path, err) - } - if updated { - rawAuths, err := json.MarshalIndent(syntheticContents.AuthConfigs, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path, err) - } - rawContents["auths"] = rawAuths - // We never modify syntheticContents.CredHelpers, so we don’t need to update it. - newData, err := json.MarshalIndent(rawContents, "", "\t") - if err != nil { - return "", fmt.Errorf("marshaling JSON %q: %w", path, err) - } - - if err = ioutils.AtomicWriteFile(path, newData, 0600); err != nil { - return "", fmt.Errorf("writing to file %q: %w", path, err) - } - } - - if description == "" { - description = path - } - return description, nil -} - -func getCredsFromCredHelper(credHelper, registry string) (types.DockerAuthConfig, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds, err := helperclient.Get(p, registry) - if err != nil { - if credentials.IsErrCredentialsNotFoundMessage(err.Error()) { - logrus.Debugf("Not logged in to %s with credential helper %s", registry, credHelper) - err = nil - } - return types.DockerAuthConfig{}, err - } - - switch creds.Username { - case "": - return types.DockerAuthConfig{ - IdentityToken: creds.Secret, - }, nil - default: - return types.DockerAuthConfig{ - Username: creds.Username, - Password: creds.Secret, - }, nil - } -} - -// setCredsInCredHelper stores (username, password) for registry in credHelper. -// Returns a human-readable description of the destination, to be returned by SetCredentials. -func setCredsInCredHelper(credHelper, registry, username, password string) (string, error) { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - creds := &credentials.Credentials{ - ServerURL: registry, - Username: username, - Secret: password, - } - if err := helperclient.Store(p, creds); err != nil { - return "", err - } - return fmt.Sprintf("credential helper: %s", credHelper), nil -} - -func deleteCredsFromCredHelper(credHelper, registry string) error { - helperName := fmt.Sprintf("docker-credential-%s", credHelper) - p := helperclient.NewShellProgramFunc(helperName) - return helperclient.Erase(p, registry) -} - -// findCredentialsInFile looks for credentials matching "key" -// (which is "registry" or a namespace in "registry") in "path". -func findCredentialsInFile(key, registry string, path authPath) (types.DockerAuthConfig, error) { - fileContents, err := path.parse() - if err != nil { - return types.DockerAuthConfig{}, fmt.Errorf("reading JSON file %q: %w", path.path, err) - } - - // First try cred helpers. They should always be normalized. - // This intentionally uses "registry", not "key"; we don't support namespaced - // credentials in helpers. - if ch, exists := fileContents.CredHelpers[registry]; exists { - logrus.Debugf("Looking up in credential helper %s based on credHelpers entry in %s", ch, path.path) - return getCredsFromCredHelper(ch, registry) - } - - // Support sub-registry namespaces in auth. - // (This is not a feature of ~/.docker/config.json; we support it even for - // those files as an extension.) - // - // Repo or namespace keys are only supported as exact matches. For registry - // keys we prefer exact matches as well. - for key := range authKeyLookupOrder(key, registry, path.legacyFormat) { - if val, exists := fileContents.AuthConfigs[key]; exists { - return decodeDockerAuth(path.path, key, val) - } - } - - // bad luck; let's normalize the entries first - // This primarily happens for legacyFormat, which for a time used API URLs - // (http[s:]//…/v1/) as keys. - // Secondarily, (docker login) accepted URLs with no normalization for - // several years, and matched registry hostnames against that, so support - // those entries even in non-legacyFormat ~/.docker/config.json. - // The docker.io registry still uses the /v1/ key with a special host name, - // so account for that as well. - registry = normalizeRegistry(registry) - for k, v := range fileContents.AuthConfigs { - if normalizeAuthFileKey(k, path.legacyFormat) == registry { - return decodeDockerAuth(path.path, k, v) - } - } - - // Only log this if we found nothing; getCredentialsWithHomeDir logs the - // source of found data. - logrus.Debugf("No credentials matching %s found in %s", key, path.path) - return types.DockerAuthConfig{}, nil -} - -// authKeyLookupOrder returns a sequence for lookup keys matching (key or registry) -// in file with legacyFormat, in order from the best match to worst. -// For example, in a non-legacy file, -// when given a repository key "quay.io/repo/ns/image", it returns -// - quay.io/repo/ns/image -// - quay.io/repo/ns -// - quay.io/repo -// - quay.io -func authKeyLookupOrder(key, registry string, legacyFormat bool) iter.Seq[string] { - return func(yield func(string) bool) { - if legacyFormat { - _ = yield(registry) // We stop in any case - return - } - - for { - if !yield(key) { - return - } - - lastSlash := strings.LastIndex(key, "/") - if lastSlash == -1 { - break - } - key = key[:lastSlash] - } - } -} - -// decodeDockerAuth decodes the username and password from conf, -// which is entry key in path. -func decodeDockerAuth(path, key string, conf dockerAuthConfig) (types.DockerAuthConfig, error) { - decoded, err := base64.StdEncoding.DecodeString(conf.Auth) - if err != nil { - return types.DockerAuthConfig{}, err - } - - user, passwordPart, valid := strings.Cut(string(decoded), ":") - if !valid { - // if it's invalid just skip, as docker does - if len(decoded) > 0 { // Docker writes "auths": { "$host": {} } entries if a credential helper is used, don’t warn about those - logrus.Warnf(`Error parsing the "auth" field of a credential entry %q in %q, missing semicolon`, key, path) // Don’t include the text of decoded, because that might put secrets into a log. - } else { - logrus.Debugf("Found an empty credential entry %q in %q (an unhandled credential helper marker?), moving on", key, path) - } - return types.DockerAuthConfig{}, nil - } - - password := strings.Trim(passwordPart, "\x00") - return types.DockerAuthConfig{ - Username: user, - Password: password, - IdentityToken: conf.IdentityToken, - }, nil -} - -// normalizeAuthFileKey takes a key, converts it to a host name and normalizes -// the resulting registry. -func normalizeAuthFileKey(key string, legacyFormat bool) string { - stripped := strings.TrimPrefix(key, "http://") - stripped = strings.TrimPrefix(stripped, "https://") - - if legacyFormat || stripped != key { - stripped, _, _ = strings.Cut(stripped, "/") - } - - return normalizeRegistry(stripped) -} - -// normalizeRegistry converts the provided registry if a known docker.io host -// is provided. -func normalizeRegistry(registry string) string { - switch registry { - case "registry-1.docker.io", "docker.io": - return "index.docker.io" - } - return registry -} - -// validateKey verifies that the input key does not have a prefix that is not -// allowed and returns an indicator if the key is namespaced. -func validateKey(key string) (bool, error) { - if strings.HasPrefix(key, "http://") || strings.HasPrefix(key, "https://") { - return false, fmt.Errorf("key %s contains http[s]:// prefix", key) - } - - // Ideally this should only accept explicitly valid keys, compare - // validateIdentityRemappingPrefix. For now, just reject values that look - // like tagged or digested values. - if strings.ContainsRune(key, '@') { - return false, fmt.Errorf(`key %s contains a '@' character`, key) - } - - firstSlash := strings.IndexRune(key, '/') - isNamespaced := firstSlash != -1 - // Reject host/repo:tag, but allow localhost:5000 and localhost:5000/foo. - if isNamespaced && strings.ContainsRune(key[firstSlash+1:], ':') { - return false, fmt.Errorf(`key %s contains a ':' character after host[:port]`, key) - } - // check if the provided key contains one or more subpaths. - return isNamespaced, nil -} diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/README.md b/vendor/github.com/containers/image/v5/pkg/strslice/README.md deleted file mode 100644 index ae6097e82e..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/strslice/README.md +++ /dev/null @@ -1 +0,0 @@ -This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice). diff --git a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go b/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go deleted file mode 100644 index bad493fb89..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go deleted file mode 100644 index c9e8ac5cbd..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_common.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !freebsd - -package sysregistriesv2 - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/etc/containers/registries.conf" - -// builtinRegistriesConfDirPath is the path to the registry configuration directory. -// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. -const builtinRegistriesConfDirPath = "/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go deleted file mode 100644 index 7dada4b779..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/paths_freebsd.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build freebsd - -package sysregistriesv2 - -// builtinRegistriesConfPath is the path to the registry configuration file. -// DO NOT change this, instead see systemRegistriesConfPath above. -const builtinRegistriesConfPath = "/usr/local/etc/containers/registries.conf" - -// builtinRegistriesConfDirPath is the path to the registry configuration directory. -// DO NOT change this, instead see systemRegistriesConfDirectoryPath above. -const builtinRegistriesConfDirPath = "/usr/local/etc/containers/registries.conf.d" diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go deleted file mode 100644 index 677629c5db..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/shortnames.go +++ /dev/null @@ -1,353 +0,0 @@ -package sysregistriesv2 - -import ( - "fmt" - "maps" - "os" - "path/filepath" - "reflect" - "strings" - - "github.com/BurntSushi/toml" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/internal/multierr" - "github.com/containers/image/v5/internal/rootless" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/lockfile" - "github.com/sirupsen/logrus" -) - -// defaultShortNameMode is the default mode of registries.conf files if the -// corresponding field is left empty. -const defaultShortNameMode = types.ShortNameModePermissive - -// userShortNamesFile is the user-specific config file to store aliases. -var userShortNamesFile = filepath.FromSlash("containers/short-name-aliases.conf") - -// shortNameAliasesConfPath returns the path to the machine-generated -// short-name-aliases.conf file. -func shortNameAliasesConfPath(ctx *types.SystemContext) (string, error) { - if ctx != nil && len(ctx.UserShortNameAliasConfPath) > 0 { - return ctx.UserShortNameAliasConfPath, nil - } - - if rootless.GetRootlessEUID() == 0 { - // Root user or in a non-conforming user NS - return filepath.Join("/var/cache", userShortNamesFile), nil - } - - // Rootless user - cacheRoot, err := homedir.GetCacheHome() - if err != nil { - return "", err - } - - return filepath.Join(cacheRoot, userShortNamesFile), nil -} - -// shortNameAliasConf is a subset of the `V2RegistriesConf` format. It's used in the -// software-maintained `userShortNamesFile`. -type shortNameAliasConf struct { - // A map for aliasing short names to their fully-qualified image - // reference counter parts. - // Note that Aliases is niled after being loaded from a file. - Aliases map[string]string `toml:"aliases"` - - // If you add any field, make sure to update nonempty() below. -} - -// nonempty returns true if config contains at least one configuration entry. -func (c *shortNameAliasConf) nonempty() bool { - copy := *c // A shallow copy - if copy.Aliases != nil && len(copy.Aliases) == 0 { - copy.Aliases = nil - } - return !reflect.DeepEqual(copy, shortNameAliasConf{}) -} - -// alias combines the parsed value of an alias with the config file it has been -// specified in. The config file is crucial for an improved user experience -// such that users are able to resolve potential pull errors. -type alias struct { - // The parsed value of an alias. May be nil if set to "" in a config. - value reference.Named - // The config file the alias originates from. - configOrigin string -} - -// shortNameAliasCache is the result of parsing shortNameAliasConf, -// pre-processed for faster usage. -type shortNameAliasCache struct { - // Note that an alias value may be nil iff it's set as an empty string - // in the config. - namedAliases map[string]alias -} - -// ResolveShortNameAlias performs an alias resolution of the specified name. -// The user-specific short-name-aliases.conf has precedence over aliases in the -// assembled registries.conf. It returns the possibly resolved alias or nil, a -// human-readable description of the config where the alias is specified, and -// an error. The origin of the config file is crucial for an improved user -// experience such that users are able to resolve potential pull errors. -// Almost all callers should use pkg/shortnames instead. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func ResolveShortNameAlias(ctx *types.SystemContext, name string) (reference.Named, string, error) { - if err := validateShortName(name); err != nil { - return nil, "", err - } - confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) - if err != nil { - return nil, "", err - } - - // Acquire the lock as a reader to allow for multiple routines in the - // same process space to read simultaneously. - lock.RLock() - defer lock.Unlock() - - _, aliasCache, err := loadShortNameAliasConf(confPath) - if err != nil { - return nil, "", err - } - - // First look up the short-name-aliases.conf. Note that a value may be - // nil iff it's set as an empty string in the config. - alias, resolved := aliasCache.namedAliases[name] - if resolved { - return alias.value, alias.configOrigin, nil - } - - config, err := getConfig(ctx) - if err != nil { - return nil, "", err - } - alias, resolved = config.aliasCache.namedAliases[name] - if resolved { - return alias.value, alias.configOrigin, nil - } - return nil, "", nil -} - -// editShortNameAlias loads the aliases.conf file and changes it. If value is -// set, it adds the name-value pair as a new alias. Otherwise, it will remove -// name from the config. -func editShortNameAlias(ctx *types.SystemContext, name string, value *string) (retErr error) { - if err := validateShortName(name); err != nil { - return err - } - if value != nil { - if _, err := parseShortNameValue(*value); err != nil { - return err - } - } - - confPath, lock, err := shortNameAliasesConfPathAndLock(ctx) - if err != nil { - return err - } - - // Acquire the lock as a writer to prevent data corruption. - lock.Lock() - defer lock.Unlock() - - // Load the short-name-alias.conf, add the specified name-value pair, - // and write it back to the file. - conf, _, err := loadShortNameAliasConf(confPath) - if err != nil { - return err - } - - if conf.Aliases == nil { // Ensure we have a map to update. - conf.Aliases = make(map[string]string) - } - if value != nil { - conf.Aliases[name] = *value - } else { - // If the name does not exist, throw an error. - if _, exists := conf.Aliases[name]; !exists { - return fmt.Errorf("short-name alias %q not found in %q: please check registries.conf files", name, confPath) - } - - delete(conf.Aliases, name) - } - - f, err := os.OpenFile(confPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - // since we are writing to this file, make sure we handle err on Close() - defer func() { - closeErr := f.Close() - if retErr == nil { - retErr = closeErr - } - }() - - encoder := toml.NewEncoder(f) - return encoder.Encode(conf) -} - -// AddShortNameAlias adds the specified name-value pair as a new alias to the -// user-specific aliases.conf. It may override an existing alias for `name`. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func AddShortNameAlias(ctx *types.SystemContext, name string, value string) error { - return editShortNameAlias(ctx, name, &value) -} - -// RemoveShortNameAlias clears the alias for the specified name. It throws an -// error in case name does not exist in the machine-generated -// short-name-alias.conf. In such case, the alias must be specified in one of -// the registries.conf files, which is the users' responsibility. -// -// Note that it’s the caller’s responsibility to pass only a repository -// (reference.IsNameOnly) as the short name. -func RemoveShortNameAlias(ctx *types.SystemContext, name string) error { - return editShortNameAlias(ctx, name, nil) -} - -// parseShortNameValue parses the specified alias into a reference.Named. The alias is -// expected to not be tagged or carry a digest and *must* include a -// domain/registry. -// -// Note that the returned reference is always normalized. -func parseShortNameValue(alias string) (reference.Named, error) { - ref, err := reference.Parse(alias) - if err != nil { - return nil, fmt.Errorf("parsing alias %q: %w", alias, err) - } - - if _, ok := ref.(reference.Digested); ok { - return nil, fmt.Errorf("invalid alias %q: must not contain digest", alias) - } - - if _, ok := ref.(reference.Tagged); ok { - return nil, fmt.Errorf("invalid alias %q: must not contain tag", alias) - } - - named, ok := ref.(reference.Named) - if !ok { - return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) - } - - registry := reference.Domain(named) - if !strings.ContainsAny(registry, ".:") && registry != "localhost" { - return nil, fmt.Errorf("invalid alias %q: must contain registry and repository", alias) - } - - // A final parse to make sure that docker.io references are correctly - // normalized (e.g., docker.io/alpine to docker.io/library/alpine. - named, err = reference.ParseNormalizedNamed(alias) - return named, err -} - -// validateShortName parses the specified `name` of an alias (i.e., the left-hand -// side) and checks if it's a short name and does not include a tag or digest. -func validateShortName(name string) error { - repo, err := reference.Parse(name) - if err != nil { - return fmt.Errorf("cannot parse short name: %q: %w", name, err) - } - - if _, ok := repo.(reference.Digested); ok { - return fmt.Errorf("invalid short name %q: must not contain digest", name) - } - - if _, ok := repo.(reference.Tagged); ok { - return fmt.Errorf("invalid short name %q: must not contain tag", name) - } - - named, ok := repo.(reference.Named) - if !ok { - return fmt.Errorf("invalid short name %q: no name", name) - } - - registry := reference.Domain(named) - if strings.ContainsAny(registry, ".:") || registry == "localhost" { - return fmt.Errorf("invalid short name %q: must not contain registry", name) - } - return nil -} - -// newShortNameAliasCache parses shortNameAliasConf and returns the corresponding internal -// representation. -func newShortNameAliasCache(path string, conf *shortNameAliasConf) (*shortNameAliasCache, error) { - res := shortNameAliasCache{ - namedAliases: make(map[string]alias), - } - errs := []error{} - for name, value := range conf.Aliases { - if err := validateShortName(name); err != nil { - errs = append(errs, err) - } - - // Empty right-hand side values in config files allow to reset - // an alias in a previously loaded config. This way, drop-in - // config files from registries.conf.d can reset potentially - // malconfigured aliases. - if value == "" { - res.namedAliases[name] = alias{nil, path} - continue - } - - named, err := parseShortNameValue(value) - if err != nil { - // We want to report *all* malformed entries to avoid a - // whack-a-mole for the user. - errs = append(errs, err) - } else { - res.namedAliases[name] = alias{named, path} - } - } - if len(errs) > 0 { - return nil, multierr.Format("", "\n", "", errs) - } - return &res, nil -} - -// updateWithConfigurationFrom updates c with configuration from updates. -// In case of conflict, updates is preferred. -func (c *shortNameAliasCache) updateWithConfigurationFrom(updates *shortNameAliasCache) { - maps.Copy(c.namedAliases, updates.namedAliases) -} - -func loadShortNameAliasConf(confPath string) (*shortNameAliasConf, *shortNameAliasCache, error) { - conf := shortNameAliasConf{} - - meta, err := toml.DecodeFile(confPath, &conf) - if err != nil && !os.IsNotExist(err) { - // It's okay if the config doesn't exist. Other errors are not. - return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) - } - if keys := meta.Undecoded(); len(keys) > 0 { - logrus.Debugf("Failed to decode keys %q from %q", keys, confPath) - } - - // Even if we don’t always need the cache, doing so validates the machine-generated config. The - // file could still be corrupted by another process or user. - cache, err := newShortNameAliasCache(confPath, &conf) - if err != nil { - return nil, nil, fmt.Errorf("loading short-name aliases config file %q: %w", confPath, err) - } - - return &conf, cache, nil -} - -func shortNameAliasesConfPathAndLock(ctx *types.SystemContext) (string, *lockfile.LockFile, error) { - shortNameAliasesConfPath, err := shortNameAliasesConfPath(ctx) - if err != nil { - return "", nil, err - } - // Make sure the path to file exists. - if err := os.MkdirAll(filepath.Dir(shortNameAliasesConfPath), 0700); err != nil { - return "", nil, err - } - - lockPath := shortNameAliasesConfPath + ".lock" - locker, err := lockfile.GetLockFile(lockPath) - return shortNameAliasesConfPath, locker, err -} diff --git a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go b/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go deleted file mode 100644 index 318988f054..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/sysregistriesv2/system_registries_v2.go +++ /dev/null @@ -1,1083 +0,0 @@ -package sysregistriesv2 - -import ( - "errors" - "fmt" - "io/fs" - "maps" - "os" - "path/filepath" - "reflect" - "slices" - "sort" - "strings" - "sync" - - "github.com/BurntSushi/toml" - "github.com/containers/image/v5/docker/reference" - "github.com/containers/image/v5/types" - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/homedir" - "github.com/containers/storage/pkg/regexp" - "github.com/sirupsen/logrus" -) - -// systemRegistriesConfPath is the path to the system-wide registry -// configuration file and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfPath=$your_path' -var systemRegistriesConfPath = builtinRegistriesConfPath - -// systemRegistriesConfDirPath is the path to the system-wide registry -// configuration directory and is used to add/subtract potential registries for -// obtaining images. You can override this at build time with -// -ldflags '-X github.com/containers/image/v5/sysregistries.systemRegistriesConfDirectoryPath=$your_path' -var systemRegistriesConfDirPath = builtinRegistriesConfDirPath - -// AuthenticationFileHelper is a special key for credential helpers indicating -// the usage of consulting containers-auth.json files instead of a credential -// helper. -const AuthenticationFileHelper = "containers-auth.json" - -const ( - // configuration values for "pull-from-mirror" - // mirrors will be used for both digest pulls and tag pulls - MirrorAll = "all" - // mirrors will only be used for digest pulls - MirrorByDigestOnly = "digest-only" - // mirrors will only be used for tag pulls - MirrorByTagOnly = "tag-only" -) - -// Endpoint describes a remote location of a registry. -type Endpoint struct { - // The endpoint's remote location. Can be empty iff Prefix contains - // wildcard in the format: "*.example.com" for subdomain matching. - // Please refer to FindRegistry / PullSourcesFromReference instead - // of accessing/interpreting `Location` directly. - Location string `toml:"location,omitempty"` - // If true, certs verification will be skipped and HTTP (non-TLS) - // connections will be allowed. - Insecure bool `toml:"insecure,omitempty"` - // PullFromMirror is used for adding restrictions to image pull through the mirror. - // Set to "all", "digest-only", or "tag-only". - // If "digest-only", mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Restricting mirrors to pulls by digest avoids that issue. - // If "tag-only", mirrors will only be used for tag pulls. For a more up-to-date and expensive mirror - // that it is less likely to be out of sync if tags move, it should not be unnecessarily - // used for digest references. - // Default is "all" (or left empty), mirrors will be used for both digest pulls and tag pulls unless the mirror-by-digest-only is set for the primary registry. - // This can only be set in a registry's Mirror field, not in the registry's primary Endpoint. - // This per-mirror setting is allowed only when mirror-by-digest-only is not configured for the primary registry. - PullFromMirror string `toml:"pull-from-mirror,omitempty"` -} - -// userRegistriesFile is the path to the per user registry configuration file. -var userRegistriesFile = filepath.FromSlash(".config/containers/registries.conf") - -// userRegistriesDir is the path to the per user registry configuration file. -var userRegistriesDir = filepath.FromSlash(".config/containers/registries.conf.d") - -// rewriteReference will substitute the provided reference `prefix` to the -// endpoints `location` from the `ref` and creates a new named reference from it. -// The function errors if the newly created reference is not parsable. -func (e *Endpoint) rewriteReference(ref reference.Named, prefix string) (reference.Named, error) { - refString := ref.String() - var newNamedRef string - // refMatchingPrefix returns the length of the match. Everything that - // follows the match gets appended to registries location. - prefixLen := refMatchingPrefix(refString, prefix) - if prefixLen == -1 { - return nil, fmt.Errorf("invalid prefix '%v' for reference '%v'", prefix, refString) - } - // In the case of an empty `location` field, simply return the original - // input ref as-is. - // - // FIXME: already validated in postProcessRegistries, so check can probably - // be dropped. - // https://github.com/containers/image/pull/1191#discussion_r610621608 - if e.Location == "" { - if !strings.HasPrefix(prefix, "*.") { - return nil, fmt.Errorf("invalid prefix '%v' for empty location, should be in the format: *.example.com", prefix) - } - return ref, nil - } - newNamedRef = e.Location + refString[prefixLen:] - newParsedRef, err := reference.ParseNamed(newNamedRef) - if err != nil { - return nil, fmt.Errorf("rewriting reference: %w", err) - } - - return newParsedRef, nil -} - -// Registry represents a registry. -type Registry struct { - // Prefix is used for matching images, and to translate one namespace to - // another. If `Prefix="example.com/bar"`, `location="example.com/foo/bar"` - // and we pull from "example.com/bar/myimage:latest", the image will - // effectively be pulled from "example.com/foo/bar/myimage:latest". - // If no Prefix is specified, it defaults to the specified location. - // Prefix can also be in the format: "*.example.com" for matching - // subdomains. The wildcard should only be in the beginning and should also - // not contain any namespaces or special characters: "/", "@" or ":". - // Please refer to FindRegistry / PullSourcesFromReference instead - // of accessing/interpreting `Prefix` directly. - Prefix string `toml:"prefix"` - // A registry is an Endpoint too - Endpoint - // The registry's mirrors. - Mirrors []Endpoint `toml:"mirror,omitempty"` - // If true, pulling from the registry will be blocked. - Blocked bool `toml:"blocked,omitempty"` - // If true, mirrors will only be used for digest pulls. Pulling images by - // tag can potentially yield different images, depending on which endpoint - // we pull from. Restricting mirrors to pulls by digest avoids that issue. - MirrorByDigestOnly bool `toml:"mirror-by-digest-only,omitempty"` -} - -// PullSource consists of an Endpoint and a Reference. Note that the reference is -// rewritten according to the registries prefix and the Endpoint's location. -type PullSource struct { - Endpoint Endpoint - Reference reference.Named -} - -// PullSourcesFromReference returns a slice of PullSource's based on the passed -// reference. -func (r *Registry) PullSourcesFromReference(ref reference.Named) ([]PullSource, error) { - var endpoints []Endpoint - _, isDigested := ref.(reference.Canonical) - if r.MirrorByDigestOnly { - // Only use mirrors when the reference is a digested one. - if isDigested { - endpoints = append(endpoints, r.Mirrors...) - } - } else { - for _, mirror := range r.Mirrors { - // skip the mirror if per mirror setting exists but reference does not match the restriction - switch mirror.PullFromMirror { - case MirrorByDigestOnly: - if !isDigested { - continue - } - case MirrorByTagOnly: - if isDigested { - continue - } - } - endpoints = append(endpoints, mirror) - } - } - endpoints = append(endpoints, r.Endpoint) - - sources := []PullSource{} - for _, ep := range endpoints { - rewritten, err := ep.rewriteReference(ref, r.Prefix) - if err != nil { - return nil, err - } - sources = append(sources, PullSource{Endpoint: ep, Reference: rewritten}) - } - - return sources, nil -} - -// V1TOMLregistries is for backwards compatibility to sysregistries v1 -type V1TOMLregistries struct { - Registries []string `toml:"registries"` -} - -// V1TOMLConfig is for backwards compatibility to sysregistries v1 -type V1TOMLConfig struct { - Search V1TOMLregistries `toml:"search"` - Insecure V1TOMLregistries `toml:"insecure"` - Block V1TOMLregistries `toml:"block"` -} - -// V1RegistriesConf is the sysregistries v1 configuration format. -type V1RegistriesConf struct { - V1TOMLConfig `toml:"registries"` -} - -// Nonempty returns true if config contains at least one configuration entry. -// Empty arrays are treated as missing entries. -func (config *V1RegistriesConf) Nonempty() bool { - copy := *config // A shallow copy - if copy.V1TOMLConfig.Search.Registries != nil && len(copy.V1TOMLConfig.Search.Registries) == 0 { - copy.V1TOMLConfig.Search.Registries = nil - } - if copy.V1TOMLConfig.Insecure.Registries != nil && len(copy.V1TOMLConfig.Insecure.Registries) == 0 { - copy.V1TOMLConfig.Insecure.Registries = nil - } - if copy.V1TOMLConfig.Block.Registries != nil && len(copy.V1TOMLConfig.Block.Registries) == 0 { - copy.V1TOMLConfig.Block.Registries = nil - } - return copy.hasSetField() -} - -// hasSetField returns true if config contains at least one configuration entry. -// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field -// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled -// as a non-nil []string{}. -func (config *V1RegistriesConf) hasSetField() bool { - return !reflect.DeepEqual(*config, V1RegistriesConf{}) -} - -// V2RegistriesConf is the sysregistries v2 configuration format. -type V2RegistriesConf struct { - Registries []Registry `toml:"registry"` - // An array of host[:port] (not prefix!) entries to use for resolving unqualified image references - UnqualifiedSearchRegistries []string `toml:"unqualified-search-registries"` - // An array of global credential helpers to use for authentication - // (e.g., ["pass", "secretservice"]). The helpers are consulted in the - // specified order. Note that "containers-auth.json" is a reserved - // value for consulting auth files as specified in - // containers-auth.json(5). - // - // If empty, CredentialHelpers defaults to ["containers-auth.json"]. - CredentialHelpers []string `toml:"credential-helpers"` - - // ShortNameMode defines how short-name resolution should be handled by - // _consumers_ of this package. Depending on the mode, the user should - // be prompted with a choice of using one of the unqualified-search - // registries when referring to a short name. - // - // Valid modes are: * "prompt": prompt if stdout is a TTY, otherwise - // use all unqualified-search registries * "enforcing": always prompt - // and error if stdout is not a TTY * "disabled": do not prompt and - // potentially use all unqualified-search registries - ShortNameMode string `toml:"short-name-mode"` - - // AdditionalLayerStoreAuthHelper is a helper binary that receives - // registry credentials pass them to Additional Layer Store for - // registry authentication. These credentials are only collected when pulling (not pushing). - AdditionalLayerStoreAuthHelper string `toml:"additional-layer-store-auth-helper"` - - shortNameAliasConf - - // If you add any field, make sure to update Nonempty() below. -} - -// Nonempty returns true if config contains at least one configuration entry. -func (config *V2RegistriesConf) Nonempty() bool { - copy := *config // A shallow copy - if copy.Registries != nil && len(copy.Registries) == 0 { - copy.Registries = nil - } - if copy.UnqualifiedSearchRegistries != nil && len(copy.UnqualifiedSearchRegistries) == 0 { - copy.UnqualifiedSearchRegistries = nil - } - if copy.CredentialHelpers != nil && len(copy.CredentialHelpers) == 0 { - copy.CredentialHelpers = nil - } - if !copy.shortNameAliasConf.nonempty() { - copy.shortNameAliasConf = shortNameAliasConf{} - } - return copy.hasSetField() -} - -// hasSetField returns true if config contains at least one configuration entry. -// This is useful because of a subtlety of the behavior of the TOML decoder, where a missing array field -// is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled -// as a non-nil []string{}. -func (config *V2RegistriesConf) hasSetField() bool { - return !reflect.DeepEqual(*config, V2RegistriesConf{}) -} - -// parsedConfig is the result of parsing, and possibly merging, configuration files; -// it is the boundary between the process of reading+ingesting the files, and -// later interpreting the configuration based on caller’s requests. -type parsedConfig struct { - // NOTE: Update also parsedConfig.updateWithConfigurationFrom! - - // partialV2 must continue to exist to maintain the return value of TryUpdatingCache - // for compatibility with existing callers. - // We store the authoritative Registries and UnqualifiedSearchRegistries values there as well. - partialV2 V2RegistriesConf - // Absolute path to the configuration file that set the UnqualifiedSearchRegistries. - unqualifiedSearchRegistriesOrigin string - // Result of parsing of partialV2.ShortNameMode. - // NOTE: May be ShortNameModeInvalid to represent ShortNameMode == "" in intermediate values; - // the full configuration in configCache / getConfig() always contains a valid value. - shortNameMode types.ShortNameMode - aliasCache *shortNameAliasCache -} - -// InvalidRegistries represents an invalid registry configurations. An example -// is when "registry.com" is defined multiple times in the configuration but -// with conflicting security settings. -type InvalidRegistries struct { - s string -} - -// Error returns the error string. -func (e *InvalidRegistries) Error() string { - return e.s -} - -// parseLocation parses the input string, performs some sanity checks and returns -// the sanitized input string. An error is returned if the input string is -// empty or if contains an "http{s,}://" prefix. -func parseLocation(input string) (string, error) { - trimmed := strings.TrimRight(input, "/") - - // FIXME: This check needs to exist but fails for empty Location field with - // wildcarded prefix. Removal of this check "only" allows invalid input in, - // and does not prevent correct operation. - // https://github.com/containers/image/pull/1191#discussion_r610122617 - // - // if trimmed == "" { - // return "", &InvalidRegistries{s: "invalid location: cannot be empty"} - // } - // - - if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { - msg := fmt.Sprintf("invalid location '%s': URI schemes are not supported", input) - return "", &InvalidRegistries{s: msg} - } - - return trimmed, nil -} - -// ConvertToV2 returns a v2 config corresponding to a v1 one. -func (config *V1RegistriesConf) ConvertToV2() (*V2RegistriesConf, error) { - regMap := make(map[string]*Registry) - // The order of the registries is not really important, but make it deterministic (the same for the same config file) - // to minimize behavior inconsistency and not contribute to difficult-to-reproduce situations. - registryOrder := []string{} - - getRegistry := func(location string) (*Registry, error) { // Note: _pointer_ to a long-lived object - var err error - location, err = parseLocation(location) - if err != nil { - return nil, err - } - reg, exists := regMap[location] - if !exists { - reg = &Registry{ - Endpoint: Endpoint{Location: location}, - Mirrors: []Endpoint{}, - Prefix: location, - } - regMap[location] = reg - registryOrder = append(registryOrder, location) - } - return reg, nil - } - - for _, blocked := range config.V1TOMLConfig.Block.Registries { - reg, err := getRegistry(blocked) - if err != nil { - return nil, err - } - reg.Blocked = true - } - for _, insecure := range config.V1TOMLConfig.Insecure.Registries { - reg, err := getRegistry(insecure) - if err != nil { - return nil, err - } - reg.Insecure = true - } - - res := &V2RegistriesConf{ - UnqualifiedSearchRegistries: config.V1TOMLConfig.Search.Registries, - } - for _, location := range registryOrder { - reg := regMap[location] - res.Registries = append(res.Registries, *reg) - } - return res, nil -} - -// anchoredDomainRegexp is an internal implementation detail of postProcess, defining the valid values of elements of UnqualifiedSearchRegistries. -var anchoredDomainRegexp = regexp.Delayed("^" + reference.DomainRegexp.String() + "$") - -// postProcess checks the consistency of all the configuration, looks for conflicts, -// and normalizes the configuration (e.g., sets the Prefix to Location if not set). -func (config *V2RegistriesConf) postProcessRegistries() error { - regMap := make(map[string][]*Registry) - - for i := range config.Registries { - reg := &config.Registries[i] - // make sure Location and Prefix are valid - var err error - reg.Location, err = parseLocation(reg.Location) - if err != nil { - return err - } - - if reg.Prefix == "" { - if reg.Location == "" { - return &InvalidRegistries{s: "invalid condition: both location and prefix are unset"} - } - reg.Prefix = reg.Location - } else { - reg.Prefix, err = parseLocation(reg.Prefix) - if err != nil { - return err - } - // FIXME: allow config authors to always use Prefix. - // https://github.com/containers/image/pull/1191#discussion_r610622495 - if !strings.HasPrefix(reg.Prefix, "*.") && reg.Location == "" { - return &InvalidRegistries{s: "invalid condition: location is unset and prefix is not in the format: *.example.com"} - } - } - - // validate the mirror usage settings does not apply to primary registry - if reg.PullFromMirror != "" { - return fmt.Errorf("pull-from-mirror must not be set for a non-mirror registry %q", reg.Prefix) - } - // make sure mirrors are valid - for j := range reg.Mirrors { - mir := ®.Mirrors[j] - mir.Location, err = parseLocation(mir.Location) - if err != nil { - return err - } - - //FIXME: unqualifiedSearchRegistries now also accepts empty values - //and shouldn't - // https://github.com/containers/image/pull/1191#discussion_r610623216 - if mir.Location == "" { - return &InvalidRegistries{s: "invalid condition: mirror location is unset"} - } - - if reg.MirrorByDigestOnly && mir.PullFromMirror != "" { - return &InvalidRegistries{s: fmt.Sprintf("cannot set mirror usage mirror-by-digest-only for the registry (%q) and pull-from-mirror for per-mirror (%q) at the same time", reg.Prefix, mir.Location)} - } - if mir.PullFromMirror != "" && mir.PullFromMirror != MirrorAll && - mir.PullFromMirror != MirrorByDigestOnly && mir.PullFromMirror != MirrorByTagOnly { - return &InvalidRegistries{s: fmt.Sprintf("unsupported pull-from-mirror value %q for mirror %q", mir.PullFromMirror, mir.Location)} - } - } - if reg.Location == "" { - regMap[reg.Prefix] = append(regMap[reg.Prefix], reg) - } else { - regMap[reg.Location] = append(regMap[reg.Location], reg) - } - } - - // Given a registry can be mentioned multiple times (e.g., to have - // multiple prefixes backed by different mirrors), we need to make sure - // there are no conflicts among them. - // - // Note: we need to iterate over the registries array to ensure a - // deterministic behavior which is not guaranteed by maps. - for _, reg := range config.Registries { - var others []*Registry - var ok bool - if reg.Location == "" { - others, ok = regMap[reg.Prefix] - } else { - others, ok = regMap[reg.Location] - } - if !ok { - return fmt.Errorf("Internal error in V2RegistriesConf.PostProcess: entry in regMap is missing") - } - for _, other := range others { - if reg.Insecure != other.Insecure { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'insecure' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - - if reg.Blocked != other.Blocked { - msg := fmt.Sprintf("registry '%s' is defined multiple times with conflicting 'blocked' setting", reg.Location) - return &InvalidRegistries{s: msg} - } - } - } - - for i := range config.UnqualifiedSearchRegistries { - registry, err := parseLocation(config.UnqualifiedSearchRegistries[i]) - if err != nil { - return err - } - if !anchoredDomainRegexp.MatchString(registry) { - return &InvalidRegistries{fmt.Sprintf("Invalid unqualified-search-registries entry %#v", registry)} - } - config.UnqualifiedSearchRegistries[i] = registry - } - - // Registries are ordered and the first longest prefix always wins, - // rendering later items with the same prefix non-existent. We cannot error - // out anymore as this might break existing users, so let's just ignore them - // to guarantee that the same prefix exists only once. - // - // As a side effect of parsedConfig.updateWithConfigurationFrom, the Registries slice - // is always sorted. To be consistent in situations where it is not called (no drop-ins), - // sort it here as well. - prefixes := []string{} - uniqueRegistries := make(map[string]Registry) - for i := range config.Registries { - // TODO: should we warn if we see the same prefix being used multiple times? - prefix := config.Registries[i].Prefix - if _, exists := uniqueRegistries[prefix]; !exists { - uniqueRegistries[prefix] = config.Registries[i] - prefixes = append(prefixes, prefix) - } - } - sort.Strings(prefixes) - config.Registries = []Registry{} - for _, prefix := range prefixes { - config.Registries = append(config.Registries, uniqueRegistries[prefix]) - } - - return nil -} - -// ConfigPath returns the path to the system-wide registry configuration file. -// Deprecated: This API implies configuration is read from files, and that there is only one. -// Please use ConfigurationSourceDescription to obtain a string usable for error messages. -func ConfigPath(ctx *types.SystemContext) string { - return newConfigWrapper(ctx).configPath -} - -// ConfigDirPath returns the path to the directory for drop-in -// registry configuration files. -// Deprecated: This API implies configuration is read from directories, and that there is only one. -// Please use ConfigurationSourceDescription to obtain a string usable for error messages. -func ConfigDirPath(ctx *types.SystemContext) string { - configWrapper := newConfigWrapper(ctx) - if configWrapper.userConfigDirPath != "" { - return configWrapper.userConfigDirPath - } - return configWrapper.configDirPath -} - -// configWrapper is used to store the paths from ConfigPath and ConfigDirPath -// and acts as a key to the internal cache. -type configWrapper struct { - // path to the registries.conf file - configPath string - // path to system-wide registries.conf.d directory, or "" if not used - configDirPath string - // path to user specified registries.conf.d directory, or "" if not used - userConfigDirPath string -} - -// newConfigWrapper returns a configWrapper for the specified SystemContext. -func newConfigWrapper(ctx *types.SystemContext) configWrapper { - return newConfigWrapperWithHomeDir(ctx, homedir.Get()) -} - -// newConfigWrapperWithHomeDir is an internal implementation detail of newConfigWrapper, -// it exists only to allow testing it with an artificial home directory. -func newConfigWrapperWithHomeDir(ctx *types.SystemContext, homeDir string) configWrapper { - var wrapper configWrapper - userRegistriesFilePath := filepath.Join(homeDir, userRegistriesFile) - userRegistriesDirPath := filepath.Join(homeDir, userRegistriesDir) - - // decide configPath using per-user path or system file - if ctx != nil && ctx.SystemRegistriesConfPath != "" { - wrapper.configPath = ctx.SystemRegistriesConfPath - } else if err := fileutils.Exists(userRegistriesFilePath); err == nil { - // per-user registries.conf exists, not reading system dir - // return config dirs from ctx or per-user one - wrapper.configPath = userRegistriesFilePath - if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { - wrapper.configDirPath = ctx.SystemRegistriesConfDirPath - } else { - wrapper.userConfigDirPath = userRegistriesDirPath - } - - return wrapper - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - wrapper.configPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfPath) - } else { - wrapper.configPath = systemRegistriesConfPath - } - - // potentially use both system and per-user dirs if not using per-user config file - if ctx != nil && ctx.SystemRegistriesConfDirPath != "" { - // dir explicitly chosen: use only that one - wrapper.configDirPath = ctx.SystemRegistriesConfDirPath - } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" { - wrapper.configDirPath = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesConfDirPath) - wrapper.userConfigDirPath = userRegistriesDirPath - } else { - wrapper.configDirPath = systemRegistriesConfDirPath - wrapper.userConfigDirPath = userRegistriesDirPath - } - - return wrapper -} - -// ConfigurationSourceDescription returns a string containers paths of registries.conf and registries.conf.d -func ConfigurationSourceDescription(ctx *types.SystemContext) string { - wrapper := newConfigWrapper(ctx) - configSources := []string{wrapper.configPath} - if wrapper.configDirPath != "" { - configSources = append(configSources, wrapper.configDirPath) - } - if wrapper.userConfigDirPath != "" { - configSources = append(configSources, wrapper.userConfigDirPath) - } - return strings.Join(configSources, ", ") -} - -// configMutex is used to synchronize concurrent accesses to configCache. -var configMutex = sync.Mutex{} - -// configCache caches already loaded configs with config paths as keys and is -// used to avoid redundantly parsing configs. Concurrent accesses to the cache -// are synchronized via configMutex. -var configCache = make(map[configWrapper]*parsedConfig) - -// InvalidateCache invalidates the registry cache. This function is meant to be -// used for long-running processes that need to reload potential changes made to -// the cached registry config files. -func InvalidateCache() { - configMutex.Lock() - defer configMutex.Unlock() - configCache = make(map[configWrapper]*parsedConfig) -} - -// getConfig returns the config object corresponding to ctx, loading it if it is not yet cached. -func getConfig(ctx *types.SystemContext) (*parsedConfig, error) { - wrapper := newConfigWrapper(ctx) - configMutex.Lock() - if config, inCache := configCache[wrapper]; inCache { - configMutex.Unlock() - return config, nil - } - configMutex.Unlock() - - return tryUpdatingCache(ctx, wrapper) -} - -// dropInConfigs returns a slice of drop-in-configs from the registries.conf.d -// directory. -func dropInConfigs(wrapper configWrapper) ([]string, error) { - var ( - configs []string - dirPaths []string - ) - if wrapper.configDirPath != "" { - dirPaths = append(dirPaths, wrapper.configDirPath) - } - if wrapper.userConfigDirPath != "" { - dirPaths = append(dirPaths, wrapper.userConfigDirPath) - } - for _, dirPath := range dirPaths { - err := filepath.WalkDir(dirPath, - // WalkFunc to read additional configs - func(path string, d fs.DirEntry, err error) error { - switch { - case err != nil: - // return error (could be a permission problem) - return err - case d == nil: - // this should only happen when err != nil but let's be sure - return nil - case d.IsDir(): - if path != dirPath { - // make sure to not recurse into sub-directories - return filepath.SkipDir - } - // ignore directories - return nil - default: - // only add *.conf files - if strings.HasSuffix(path, ".conf") { - configs = append(configs, path) - } - return nil - } - }, - ) - - if err != nil && !os.IsNotExist(err) { - // Ignore IsNotExist errors: most systems won't have a registries.conf.d - // directory. - return nil, fmt.Errorf("reading registries.conf.d: %w", err) - } - } - - return configs, nil -} - -// TryUpdatingCache loads the configuration from the provided `SystemContext` -// without using the internal cache. On success, the loaded configuration will -// be added into the internal registry cache. -// It returns the resulting configuration; this is DEPRECATED and may not correctly -// reflect any future data handled by this package. -func TryUpdatingCache(ctx *types.SystemContext) (*V2RegistriesConf, error) { - config, err := tryUpdatingCache(ctx, newConfigWrapper(ctx)) - if err != nil { - return nil, err - } - return &config.partialV2, err -} - -// tryUpdatingCache implements TryUpdatingCache with an additional configWrapper -// argument to avoid redundantly calculating the config paths. -func tryUpdatingCache(ctx *types.SystemContext, wrapper configWrapper) (*parsedConfig, error) { - configMutex.Lock() - defer configMutex.Unlock() - - // load the config - config, err := loadConfigFile(wrapper.configPath, false) - if err != nil { - // Continue with an empty []Registry if we use the default config, which - // implies that the config path of the SystemContext isn't set. - // - // Note: if ctx.SystemRegistriesConfPath points to the default config, - // we will still return an error. - if os.IsNotExist(err) && (ctx == nil || ctx.SystemRegistriesConfPath == "") { - config = &parsedConfig{} - config.partialV2 = V2RegistriesConf{Registries: []Registry{}} - config.aliasCache, err = newShortNameAliasCache("", &shortNameAliasConf{}) - if err != nil { - return nil, err // Should never happen - } - } else { - return nil, fmt.Errorf("loading registries configuration %q: %w", wrapper.configPath, err) - } - } - - // Load the configs from the conf directory path. - dinConfigs, err := dropInConfigs(wrapper) - if err != nil { - return nil, err - } - for _, path := range dinConfigs { - // Enforce v2 format for drop-in-configs. - dropIn, err := loadConfigFile(path, true) - if err != nil { - if errors.Is(err, fs.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return nil, fmt.Errorf("loading drop-in registries configuration %q: %w", path, err) - } - config.updateWithConfigurationFrom(dropIn) - } - - if config.shortNameMode == types.ShortNameModeInvalid { - config.shortNameMode = defaultShortNameMode - } - - if len(config.partialV2.CredentialHelpers) == 0 { - config.partialV2.CredentialHelpers = []string{AuthenticationFileHelper} - } - - // populate the cache - configCache[wrapper] = config - return config, nil -} - -// GetRegistries has been deprecated. Use FindRegistry instead. -// -// GetRegistries loads and returns the registries specified in the config. -// Note the parsed content of registry config files is cached. For reloading, -// use `InvalidateCache` and re-call `GetRegistries`. -func GetRegistries(ctx *types.SystemContext) ([]Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - return config.partialV2.Registries, nil -} - -// UnqualifiedSearchRegistries returns a list of host[:port] entries to try -// for unqualified image search, in the returned order) -func UnqualifiedSearchRegistries(ctx *types.SystemContext) ([]string, error) { - registries, _, err := UnqualifiedSearchRegistriesWithOrigin(ctx) - return registries, err -} - -// UnqualifiedSearchRegistriesWithOrigin returns a list of host[:port] entries -// to try for unqualified image search, in the returned order. It also returns -// a human-readable description of where these entries are specified (e.g., a -// registries.conf file). -func UnqualifiedSearchRegistriesWithOrigin(ctx *types.SystemContext) ([]string, string, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, "", err - } - return config.partialV2.UnqualifiedSearchRegistries, config.unqualifiedSearchRegistriesOrigin, nil -} - -// parseShortNameMode translates the string into well-typed -// types.ShortNameMode. -func parseShortNameMode(mode string) (types.ShortNameMode, error) { - switch mode { - case "disabled": - return types.ShortNameModeDisabled, nil - case "enforcing": - return types.ShortNameModeEnforcing, nil - case "permissive": - return types.ShortNameModePermissive, nil - default: - return types.ShortNameModeInvalid, fmt.Errorf("invalid short-name mode: %q", mode) - } -} - -// GetShortNameMode returns the configured types.ShortNameMode. -func GetShortNameMode(ctx *types.SystemContext) (types.ShortNameMode, error) { - if ctx != nil && ctx.ShortNameMode != nil { - return *ctx.ShortNameMode, nil - } - config, err := getConfig(ctx) - if err != nil { - return -1, err - } - return config.shortNameMode, err -} - -// CredentialHelpers returns the global top-level credential helpers. -func CredentialHelpers(sys *types.SystemContext) ([]string, error) { - config, err := getConfig(sys) - if err != nil { - return nil, err - } - return config.partialV2.CredentialHelpers, nil -} - -// AdditionalLayerStoreAuthHelper returns the helper for passing registry -// credentials to Additional Layer Store. -func AdditionalLayerStoreAuthHelper(sys *types.SystemContext) (string, error) { - config, err := getConfig(sys) - if err != nil { - return "", err - } - return config.partialV2.AdditionalLayerStoreAuthHelper, nil -} - -// refMatchingSubdomainPrefix returns the length of ref -// iff ref, which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value containing wildcarded subdomains in the -// format: *.example.com. Wildcards are only accepted at the beginning, so -// other formats like example.*.com will not work. Wildcarded prefixes also -// cannot contain port numbers or namespaces in them. -func refMatchingSubdomainPrefix(ref, prefix string) int { - index := strings.Index(ref, prefix[1:]) - if index == -1 { - return -1 - } - if strings.Contains(ref[:index], "/") { - return -1 - } - index += len(prefix[1:]) - if index == len(ref) { - return index - } - switch ref[index] { - case ':', '/', '@': - return index - default: - return -1 - } -} - -// refMatchingPrefix returns the length of the prefix iff ref, -// which is a registry, repository namespace, repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!), -// matches a Registry.Prefix value. -// (This is split from the caller primarily to make testing easier.) -func refMatchingPrefix(ref, prefix string) int { - switch { - case strings.HasPrefix(prefix, "*."): - return refMatchingSubdomainPrefix(ref, prefix) - case len(ref) < len(prefix): - return -1 - case len(ref) == len(prefix): - if ref == prefix { - return len(prefix) - } - return -1 - case len(ref) > len(prefix): - if !strings.HasPrefix(ref, prefix) { - return -1 - } - c := ref[len(prefix)] - // This allows "example.com:5000" to match "example.com", - // which is unintended; that will get fixed eventually, DON'T RELY - // ON THE CURRENT BEHAVIOR. - if c == ':' || c == '/' || c == '@' { - return len(prefix) - } - return -1 - default: - panic("Internal error: impossible comparison outcome") - } -} - -// FindRegistry returns the Registry with the longest prefix for ref, -// which is a registry, repository namespace repository or image reference (as formatted by -// reference.Domain(), reference.Named.Name() or reference.Reference.String() -// — note that this requires the name to start with an explicit hostname!). -// If no Registry prefixes the image, nil is returned. -func FindRegistry(ctx *types.SystemContext, ref string) (*Registry, error) { - config, err := getConfig(ctx) - if err != nil { - return nil, err - } - - return findRegistryWithParsedConfig(config, ref) -} - -// findRegistryWithParsedConfig implements `FindRegistry` with a pre-loaded -// parseConfig. -func findRegistryWithParsedConfig(config *parsedConfig, ref string) (*Registry, error) { - reg := Registry{} - prefixLen := 0 - for _, r := range config.partialV2.Registries { - if refMatchingPrefix(ref, r.Prefix) != -1 { - length := len(r.Prefix) - if length > prefixLen { - reg = r - prefixLen = length - } - } - } - if prefixLen != 0 { - return ®, nil - } - return nil, nil -} - -// loadConfigFile loads and unmarshals a single config file. -// Use forceV2 if the config must in the v2 format. -func loadConfigFile(path string, forceV2 bool) (*parsedConfig, error) { - logrus.Debugf("Loading registries configuration %q", path) - - // tomlConfig allows us to unmarshal either V1 or V2 simultaneously. - type tomlConfig struct { - V2RegistriesConf - V1RegistriesConf // for backwards compatibility with sysregistries v1 - } - - // Load the tomlConfig. Note that `DecodeFile` will overwrite set fields. - var combinedTOML tomlConfig - meta, err := toml.DecodeFile(path, &combinedTOML) - if err != nil { - return nil, err - } - if keys := meta.Undecoded(); len(keys) > 0 { - logrus.Debugf("Failed to decode keys %q from %q", keys, path) - } - - if combinedTOML.V1RegistriesConf.hasSetField() { - // Enforce the v2 format if requested. - if forceV2 { - return nil, &InvalidRegistries{s: "registry must be in v2 format but is in v1"} - } - - // Convert a v1 config into a v2 config. - if combinedTOML.V2RegistriesConf.hasSetField() { - return nil, &InvalidRegistries{s: fmt.Sprintf("mixing sysregistry v1/v2 is not supported: %#v", combinedTOML)} - } - converted, err := combinedTOML.V1RegistriesConf.ConvertToV2() - if err != nil { - return nil, err - } - combinedTOML.V1RegistriesConf = V1RegistriesConf{} - combinedTOML.V2RegistriesConf = *converted - } - - res := parsedConfig{partialV2: combinedTOML.V2RegistriesConf} - - // Post process registries, set the correct prefixes, sanity checks, etc. - if err := res.partialV2.postProcessRegistries(); err != nil { - return nil, err - } - - res.unqualifiedSearchRegistriesOrigin = path - - if len(res.partialV2.ShortNameMode) > 0 { - mode, err := parseShortNameMode(res.partialV2.ShortNameMode) - if err != nil { - return nil, err - } - res.shortNameMode = mode - } else { - res.shortNameMode = types.ShortNameModeInvalid - } - - // Valid wildcarded prefixes must be in the format: *.example.com - // FIXME: Move to postProcessRegistries - // https://github.com/containers/image/pull/1191#discussion_r610623829 - for i := range res.partialV2.Registries { - prefix := res.partialV2.Registries[i].Prefix - if strings.HasPrefix(prefix, "*.") && strings.ContainsAny(prefix, "/@:") { - msg := fmt.Sprintf("Wildcarded prefix should be in the format: *.example.com. Current prefix %q is incorrectly formatted", prefix) - return nil, &InvalidRegistries{s: msg} - } - } - - // Parse and validate short-name aliases. - cache, err := newShortNameAliasCache(path, &res.partialV2.shortNameAliasConf) - if err != nil { - return nil, fmt.Errorf("validating short-name aliases: %w", err) - } - res.aliasCache = cache - // Clear conf.partialV2.shortNameAliasConf to make it available for garbage collection and - // reduce memory consumption. We're consulting aliasCache for lookups. - res.partialV2.shortNameAliasConf = shortNameAliasConf{} - - return &res, nil -} - -// updateWithConfigurationFrom updates c with configuration from updates. -// -// Fields present in updates will typically replace already set fields in c. -// The [[registry]] and alias tables are merged. -func (c *parsedConfig) updateWithConfigurationFrom(updates *parsedConfig) { - // == Merge Registries: - registryMap := make(map[string]Registry) - for i := range c.partialV2.Registries { - registryMap[c.partialV2.Registries[i].Prefix] = c.partialV2.Registries[i] - } - // Merge the freshly loaded registries. - for i := range updates.partialV2.Registries { - registryMap[updates.partialV2.Registries[i].Prefix] = updates.partialV2.Registries[i] - } - - // Go maps have a non-deterministic order when iterating the keys, so - // we sort the keys to enforce some order in Registries slice. - // Some consumers of c/image (e.g., CRI-O) log the configuration - // and a non-deterministic order could easily cause confusion. - prefixes := slices.Sorted(maps.Keys(registryMap)) - - c.partialV2.Registries = []Registry{} - for _, prefix := range prefixes { - c.partialV2.Registries = append(c.partialV2.Registries, registryMap[prefix]) - } - - // == Merge UnqualifiedSearchRegistries: - // This depends on an subtlety of the behavior of the TOML decoder, where a missing array field - // is not modified while unmarshaling (in our case remains to nil), while an [] is unmarshaled - // as a non-nil []string{}. - if updates.partialV2.UnqualifiedSearchRegistries != nil { - c.partialV2.UnqualifiedSearchRegistries = updates.partialV2.UnqualifiedSearchRegistries - c.unqualifiedSearchRegistriesOrigin = updates.unqualifiedSearchRegistriesOrigin - } - - // == Merge credential helpers: - if updates.partialV2.CredentialHelpers != nil { - c.partialV2.CredentialHelpers = updates.partialV2.CredentialHelpers - } - - // == Merge shortNameMode: - // We don’t maintain c.partialV2.ShortNameMode. - if updates.shortNameMode != types.ShortNameModeInvalid { - c.shortNameMode = updates.shortNameMode - } - - // == Merge AdditionalLayerStoreAuthHelper: - if updates.partialV2.AdditionalLayerStoreAuthHelper != "" { - c.partialV2.AdditionalLayerStoreAuthHelper = updates.partialV2.AdditionalLayerStoreAuthHelper - } - - // == Merge aliasCache: - // We don’t maintain (in fact we actively clear) c.partialV2.shortNameAliasConf. - c.aliasCache.updateWithConfigurationFrom(updates.aliasCache) -} diff --git a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go b/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go deleted file mode 100644 index 4e0ee57e91..0000000000 --- a/vendor/github.com/containers/image/v5/pkg/tlsclientconfig/tlsclientconfig.go +++ /dev/null @@ -1,101 +0,0 @@ -package tlsclientconfig - -import ( - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "net" - "net/http" - "os" - "path/filepath" - "slices" - "strings" - "time" - - "github.com/sirupsen/logrus" -) - -// SetupCertificates opens all .crt, .cert, and .key files in dir and appends / loads certs and key pairs as appropriate to tlsc -func SetupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := os.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - logrus.Debugf(" crt: %s", fullPath) - data, err := os.ReadFile(fullPath) - if err != nil { - if errors.Is(err, os.ErrNotExist) { - // file must have been removed between the directory listing - // and the open call, ignore that as it is a expected race - continue - } - return err - } - if tlsc.RootCAs == nil { - systemPool, err := x509.SystemCertPool() - if err != nil { - return fmt.Errorf("unable to get system cert pool: %w", err) - } - tlsc.RootCAs = systemPool - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if base, ok := strings.CutSuffix(f.Name(), ".cert"); ok { - certName := f.Name() - keyName := base + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(slices.Clone(tlsc.Certificates), cert) - } - if base, ok := strings.CutSuffix(f.Name(), ".key"); ok { - keyName := f.Name() - certName := base + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return fmt.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.DirEntry, name string) bool { - return slices.ContainsFunc(files, func(f os.DirEntry) bool { - return f.Name() == name - }) -} - -// NewTransport Creates a default transport -func NewTransport() *http.Transport { - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - } - tr := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: direct.DialContext, - TLSHandshakeTimeout: 10 * time.Second, - IdleConnTimeout: 90 * time.Second, - MaxIdleConns: 100, - } - return tr -} diff --git a/vendor/github.com/containers/image/v5/transports/stub.go b/vendor/github.com/containers/image/v5/transports/stub.go deleted file mode 100644 index 2c186a90cc..0000000000 --- a/vendor/github.com/containers/image/v5/transports/stub.go +++ /dev/null @@ -1,36 +0,0 @@ -package transports - -import ( - "fmt" - - "github.com/containers/image/v5/types" -) - -// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -type stubTransport string - -// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”. -func NewStubTransport(name string) types.ImageTransport { - return stubTransport(name) -} - -// Name returns the name of the transport, which must be unique among other transports. -func (s stubTransport) Name() string { - return string(s) -} - -// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. -func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) { - return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s)) -} - -// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys -// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). -// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. -// scope passed to this function will not be "", that value is always allowed. -func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error { - // Allowing any reference in here allows tools with some transports stubbed-out to still - // use signature verification policies which refer to these stubbed-out transports. - // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON . - return nil -} diff --git a/vendor/github.com/containers/image/v5/transports/transports.go b/vendor/github.com/containers/image/v5/transports/transports.go deleted file mode 100644 index 4c9c0889c2..0000000000 --- a/vendor/github.com/containers/image/v5/transports/transports.go +++ /dev/null @@ -1,90 +0,0 @@ -package transports - -import ( - "fmt" - "sort" - "sync" - - "github.com/containers/image/v5/internal/set" - "github.com/containers/image/v5/types" -) - -// knownTransports is a registry of known ImageTransport instances. -type knownTransports struct { - transports map[string]types.ImageTransport - mu sync.Mutex -} - -func (kt *knownTransports) Get(k string) types.ImageTransport { - kt.mu.Lock() - t := kt.transports[k] - kt.mu.Unlock() - return t -} - -func (kt *knownTransports) Remove(k string) { - kt.mu.Lock() - delete(kt.transports, k) - kt.mu.Unlock() -} - -func (kt *knownTransports) Add(t types.ImageTransport) { - kt.mu.Lock() - defer kt.mu.Unlock() - name := t.Name() - if t := kt.transports[name]; t != nil { - panic(fmt.Sprintf("Duplicate image transport name %s", name)) - } - kt.transports[name] = t -} - -var kt *knownTransports - -func init() { - kt = &knownTransports{ - transports: make(map[string]types.ImageTransport), - } -} - -// Get returns the transport specified by name or nil when unavailable. -func Get(name string) types.ImageTransport { - return kt.Get(name) -} - -// Delete deletes a transport from the registered transports. -func Delete(name string) { - kt.Remove(name) -} - -// Register registers a transport. -func Register(t types.ImageTransport) { - kt.Add(t) -} - -// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that -// ParseImageName(ImageName(reference)) returns an equivalent reference. -// -// This is the generally recommended way to refer to images in the UI. -// -// NOTE: The returned string is not promised to be equal to the original input to ParseImageName; -// e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. -func ImageName(ref types.ImageReference) string { - return ref.Transport().Name() + ":" + ref.StringWithinTransport() -} - -var deprecatedTransports = set.NewWithValues("atomic", "ostree") - -// ListNames returns a list of non deprecated transport names. -// Deprecated transports can be used, but are not presented to users. -func ListNames() []string { - kt.mu.Lock() - defer kt.mu.Unlock() - var names []string - for _, transport := range kt.transports { - if !deprecatedTransports.Contains(transport.Name()) { - names = append(names, transport.Name()) - } - } - sort.Strings(names) - return names -} diff --git a/vendor/github.com/containers/image/v5/types/types.go b/vendor/github.com/containers/image/v5/types/types.go deleted file mode 100644 index a93951780b..0000000000 --- a/vendor/github.com/containers/image/v5/types/types.go +++ /dev/null @@ -1,731 +0,0 @@ -package types - -import ( - "context" - "io" - "net/url" - "time" - - "github.com/containers/image/v5/docker/reference" - compression "github.com/containers/image/v5/pkg/compression/types" - digest "github.com/opencontainers/go-digest" - v1 "github.com/opencontainers/image-spec/specs-go/v1" -) - -// ImageTransport is a top-level namespace for ways to store/load an image. -// It should generally correspond to ImageSource/ImageDestination implementations. -// -// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport. -// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS -// (or, even, IPv4 or IPv6). -// -// OTOH all images using the same transport should (apart from versions of the image format), be interoperable. -// For example, several different ImageTransport implementations may be based on local filesystem paths, -// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...) -// -// See also transports.KnownTransports. -type ImageTransport interface { - // Name returns the name of the transport, which must be unique among other transports. - Name() string - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference. - ParseReference(reference string) (ImageReference, error) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys - // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). - // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. - // scope passed to this function will not be "", that value is always allowed. - ValidatePolicyConfigurationScope(scope string) error -} - -// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport. -// -// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening -// within an ImageTransport.ParseReference() or equivalent API creating the reference object. -// That's also why the various identification/formatting methods of this type do not support returning errors. -// -// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside -// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on. -type ImageReference interface { - Transport() ImageTransport - // StringWithinTransport returns a string representation of the reference, which MUST be such that - // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference. - // NOTE: The returned string is not promised to be equal to the original input to ParseReference; - // e.g. default attribute values omitted by the user may be filled in the return value, or vice versa. - // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix; - // instead, see transports.ImageName(). - StringWithinTransport() string - - // DockerReference returns a Docker reference associated with this reference - // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent, - // not e.g. after redirect or alias processing), or nil if unknown/not applicable. - DockerReference() reference.Named - - // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup. - // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases; - // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical - // (i.e. various references with exactly the same semantics should return the same configuration identity) - // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but - // not required/guaranteed that it will be a valid input to Transport().ParseReference(). - // Returns "" if configuration identities for these references are not supported. - PolicyConfigurationIdentity() string - - // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search - // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed - // in order, terminating on first match, and an implicit "" is always checked at the end. - // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(), - // and each following element to be a prefix of the element preceding it. - PolicyConfigurationNamespaces() []string - - // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned ImageCloser. - // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, - // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. - NewImage(ctx context.Context, sys *SystemContext) (ImageCloser, error) - // NewImageSource returns a types.ImageSource for this reference. - // The caller must call .Close() on the returned ImageSource. - NewImageSource(ctx context.Context, sys *SystemContext) (ImageSource, error) - // NewImageDestination returns a types.ImageDestination for this reference. - // The caller must call .Close() on the returned ImageDestination. - NewImageDestination(ctx context.Context, sys *SystemContext) (ImageDestination, error) - - // DeleteImage deletes the named image from the registry, if supported. - DeleteImage(ctx context.Context, sys *SystemContext) error -} - -// LayerCompression indicates if layers must be compressed, decompressed or preserved -type LayerCompression int - -const ( - // PreserveOriginal indicates the layer must be preserved, ie - // no compression or decompression. - PreserveOriginal LayerCompression = iota - // Decompress indicates the layer must be decompressed - Decompress - // Compress indicates the layer must be compressed - Compress -) - -// LayerCrypto indicates if layers have been encrypted or decrypted or none -type LayerCrypto int - -const ( - // PreserveOriginalCrypto indicates the layer must be preserved, ie - // no encryption/decryption - PreserveOriginalCrypto LayerCrypto = iota - // Encrypt indicates the layer is encrypted - Encrypt - // Decrypt indicates the layer is decrypted - Decrypt -) - -// BlobInfo collects known information about a blob (layer/config). -// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that. -type BlobInfo struct { - Digest digest.Digest // "" if unknown. - Size int64 // -1 if unknown - URLs []string - Annotations map[string]string - MediaType string - - // NOTE: The following fields contain desired _edits_ to blob infos. - // Conceptually then don't belong in the BlobInfo object at all; - // the edits should be provided specifically as parameters to the edit implementation. - // We can’t remove the fields without breaking compatibility, but don’t - // add any more. - - // CompressionOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer's "compressed or not" should be preserved, - // possibly while changing the compression algorithm from one to another, - // or if it should be changed to compressed or decompressed. - // The field defaults to preserve the original layer's compressedness. - // TODO: To remove together with CryptoOperation in re-design to remove - // field out of BlobInfo. - CompressionOperation LayerCompression - // CompressionAlgorithm is used in Image.UpdateLayerInfos to set the correct - // MIME type for compressed layers (e.g., gzip or zstd). This field MUST be - // set when `CompressionOperation == Compress` and MAY be set when - // `CompressionOperation == PreserveOriginal` and the compression type is - // being changed for an already-compressed layer. - CompressionAlgorithm *compression.Algorithm - // CryptoOperation is used in Image.UpdateLayerInfos to instruct - // whether the original layer was encrypted/decrypted - // TODO: To remove together with CompressionOperation in re-design to - // remove field out of BlobInfo. - CryptoOperation LayerCrypto - // Before adding any fields to this struct, read the NOTE above. -} - -// BICTransportScope encapsulates transport-dependent representation of a “scope” where blobs are or are not present. -// BlobInfocache.RecordKnownLocations / BlobInfocache.CandidateLocations record data about blobs keyed by (scope, digest). -// The scope will typically be similar to an ImageReference, or a superset of it within which blobs are reusable. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICTransportScope struct { - Opaque string -} - -// BICLocationReference encapsulates transport-dependent representation of a blob location within a BICTransportScope. -// Each transport can store arbitrary data using BlobInfoCache.RecordKnownLocation, and ImageDestination.TryReusingBlob -// can look it up using BlobInfoCache.CandidateLocations. -// -// NOTE: The contents of this structure may be recorded in a persistent file, possibly shared across different -// tools which use different versions of the transport. Allow for reasonable backward/forward compatibility, -// at least by not failing hard when encountering unknown data. -type BICLocationReference struct { - Opaque string -} - -// BICReplacementCandidate is an item returned by BlobInfoCache.CandidateLocations. -type BICReplacementCandidate struct { - Digest digest.Digest - Location BICLocationReference -} - -// BlobInfoCache records data useful for reusing blobs, or substituting equivalent ones, to avoid unnecessary blob copies. -// -// It records two kinds of data: -// -// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs: -// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest. -// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompression), -// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/ -// -// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known -// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value). -// -// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently -// compress/decompress blobs for their own purposes. -// -// - Known blob locations, managed by individual transports: -// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob), -// recording transport-specific information that allows the transport to reuse the blob in the future; -// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused. -// -// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs -// can be directly reused within a registry, or mounted across registries within a registry server.) -// -// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal; -// users of the cache should just fall back to copying the blobs the usual way. -// -// The BlobInfoCache interface is deprecated. Consumers of this library should use one of the implementations provided by -// subpackages of the library's "pkg/blobinfocache" package in preference to implementing the interface on their own. -type BlobInfoCache interface { - // UncompressedDigest returns an uncompressed digest corresponding to anyDigest. - // May return anyDigest if it is known to be uncompressed. - // Returns "" if nothing is known about the digest (it may be compressed or uncompressed). - UncompressedDigest(anyDigest digest.Digest) digest.Digest - // RecordDigestUncompressedPair records that the uncompressed version of anyDigest is uncompressed. - // It’s allowed for anyDigest == uncompressed. - // WARNING: Only call this for LOCALLY VERIFIED data; don’t record a digest pair just because some remote author claims so (e.g. - // because a manifest/config pair exists); otherwise the cache could be poisoned and allow substituting unexpected blobs. - // (Eventually, the DiffIDs in image config could detect the substitution, but that may be too late, and not all image formats contain that data.) - RecordDigestUncompressedPair(anyDigest digest.Digest, uncompressed digest.Digest) - - // RecordKnownLocation records that a blob with the specified digest exists within the specified (transport, scope) scope, - // and can be reused given the opaque location data. - RecordKnownLocation(transport ImageTransport, scope BICTransportScope, digest digest.Digest, location BICLocationReference) - // CandidateLocations returns a prioritized, limited, number of blobs and their locations that could possibly be reused - // within the specified (transport scope) (if they still exist, which is not guaranteed). - // - // If !canSubstitute, the returned candidates will match the submitted digest exactly; if canSubstitute, - // data from previous RecordDigestUncompressedPair calls is used to also look up variants of the blob which have the same - // uncompressed digest. - CandidateLocations(transport ImageTransport, scope BICTransportScope, digest digest.Digest, canSubstitute bool) []BICReplacementCandidate -} - -// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). -// This is primarily useful for copying images around; for examining their properties, Image (below) -// is usually more useful. -// Each ImageSource should eventually be closed by calling Close(). -// -// WARNING: Various methods which return an object identified by digest generally do not -// validate that the returned data actually matches that digest; this is the caller’s responsibility. -// See the individual methods’ documentation for potentially more details. -type ImageSource interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Close removes resources associated with an initialized ImageSource, if any. - Close() error - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). - // It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); - // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). - // - // WARNING: This is a raw access to the data as provided by the source; if the reference contains a digest, or instanceDigest is set, - // callers must enforce the digest match themselves, typically by using image.UnparsedInstance to access the manifest instead - // of calling this directly. (Compare the generic warning applicable to all of the [ImageSource] interface.) - GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location. - // - // WARNING: This is a raw access to the data as provided by the source; callers must validate the contents - // against the blob’s digest themselves. (Compare the generic warning applicable to all of the [ImageSource] interface.) - GetBlob(context.Context, BlobInfo, BlobInfoCache) (io.ReadCloser, int64, error) - // HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently. - HasThreadSafeGetBlob() bool - // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer - // blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob() - // to read the image's layers. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve BlobInfos for - // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list - // (e.g. if the source never returns manifest lists). - // The Digest field is guaranteed to be provided; Size may be -1. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(ctx context.Context, instanceDigest *digest.Digest) ([]BlobInfo, error) -} - -// ImageDestination is a service, possibly remote (= slow), to store components of a single image. -// -// There is a specific required order for some of the calls: -// TryReusingBlob/PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time) -// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents) -// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist. -// -// Each ImageDestination should eventually be closed by calling Close(). -type ImageDestination interface { - // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, - // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects. - Reference() ImageReference - // Close removes resources associated with an initialized ImageDestination, if any. - Close() error - - // SupportedManifestMIMETypes tells which manifest mime types the destination supports - // If an empty slice or nil it's returned, then any mime type can be tried to upload - SupportedManifestMIMETypes() []string - // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures. - // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil. - SupportsSignatures(ctx context.Context) error - // DesiredLayerCompression indicates the kind of compression to apply on layers - DesiredLayerCompression() LayerCompression - // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually - // uploaded to the image destination, true otherwise. - AcceptsForeignLayerURLs() bool - // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime architecture and OS. False otherwise. - MustMatchRuntimeOS() bool - // IgnoresEmbeddedDockerReference() returns true iff the destination does not care about Image.EmbeddedDockerReferenceConflicts(), - // and would prefer to receive an unmodified manifest instead of one modified for the destination. - // Does not make a difference if Reference().DockerReference() is nil. - IgnoresEmbeddedDockerReference() bool - - // PutBlob writes contents of stream and returns data representing the result. - // inputInfo.Digest can be optionally provided if known; if provided, and stream is read to the end without error, the digest MUST match the stream contents. - // inputInfo.Size is the expected length of stream, if known. - // inputInfo.MediaType describes the blob format, if known. - // May update cache. - // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available - // to any other readers for download using the supplied digest. - // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. - PutBlob(ctx context.Context, stream io.Reader, inputInfo BlobInfo, cache BlobInfoCache, isConfig bool) (BlobInfo, error) - // HasThreadSafePutBlob indicates whether PutBlob can be executed concurrently. - HasThreadSafePutBlob() bool - // TryReusingBlob checks whether the transport already contains, or can efficiently reuse, a blob, and if so, applies it to the current destination - // (e.g. if the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree). - // info.Digest must not be empty. - // If canSubstitute, TryReusingBlob can use an equivalent equivalent of the desired blob; in that case the returned info may not match the input. - // If the blob has been successfully reused, returns (true, info, nil); info must contain at least a digest and size, and may - // include CompressionOperation and CompressionAlgorithm fields to indicate that a change to the compression type should be - // reflected in the manifest that will be written. - // If the transport can not reuse the requested blob, TryReusingBlob returns (false, {}, nil); it returns a non-nil error only on an unexpected failure. - // May use and/or update cache. - TryReusingBlob(ctx context.Context, info BlobInfo, cache BlobInfoCache, canSubstitute bool) (bool, BlobInfo, error) - // PutManifest writes manifest to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write the manifest for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // It is expected but not enforced that the instanceDigest, when specified, matches the digest of `manifest` as generated - // by `manifest.Digest()`. - // FIXME? This should also receive a MIME type if known, to differentiate between schema versions. - // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), - // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. - PutManifest(ctx context.Context, manifest []byte, instanceDigest *digest.Digest) error - // PutSignatures writes a set of signatures to the destination. - // If instanceDigest is not nil, it contains a digest of the specific manifest instance to write or overwrite the signatures for - // (when the primary manifest is a manifest list); this should always be nil if the primary manifest is not a manifest list. - // MUST be called after PutManifest (signatures may reference manifest contents). - PutSignatures(ctx context.Context, signatures [][]byte, instanceDigest *digest.Digest) error - // Commit marks the process of storing the image as successful and asks for the image to be persisted. - // unparsedToplevel contains data about the top-level manifest of the source (which may be a single-arch image or a manifest list - // if PutManifest was only called for the single-arch image with instanceDigest == nil), primarily to allow lookups by the - // original manifest list digest, if desired. - // WARNING: This does not have any transactional semantics: - // - Uploaded data MAY be visible to others before Commit() is called - // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed) - Commit(ctx context.Context, unparsedToplevel UnparsedImage) error -} - -// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available, -// refuses specifically this manifest type, but may accept a different manifest type. -type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise. - Err error -} - -func (e ManifestTypeRejectedError) Error() string { - return e.Err.Error() -} - -// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs. -// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, -// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. -// This also makes the UnparsedImage→Image conversion an explicitly visible step. -// -// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The UnparsedImage must not be used after the underlying ImageSource is Close()d. -type UnparsedImage interface { - // Reference returns the reference used to set up this source, _as specified by the user_ - // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. - Reference() ImageReference - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. - Manifest(ctx context.Context) ([]byte, string, error) - // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. - Signatures(ctx context.Context) ([][]byte, error) -} - -// Image is the primary API for inspecting properties of images. -// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. -// -// The Image must not be used after the underlying ImageSource is Close()d. -type Image interface { - // Note that Reference may return nil in the return value of UpdatedImage! - UnparsedImage - // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. - // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. - ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. - // The result is cached; it is OK to call this however often you need. - ConfigBlob(context.Context) ([]byte, error) - // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about - // layers in the resulting configuration isn't guaranteed to be returned to due how - // old image manifests work (docker v2s1 especially). - OCIConfig(context.Context) (*v1.Image, error) - // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfos() []BlobInfo - // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. - // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. - // WARNING: The list may contain duplicates, and they are semantically relevant. - LayerInfosForCopy(context.Context) ([]BlobInfo, error) - // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. - // It returns false if the manifest does not embed a Docker reference. - // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.) - EmbeddedDockerReferenceConflicts(ref reference.Named) bool - // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. - Inspect(context.Context) (*ImageInspectInfo, error) - // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. - // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute - // (most importantly it forces us to download the full layers even if they are already present at the destination). - UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool - // UpdatedImage returns a types.Image modified according to options. - // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. - // This does not change the state of the original Image object. - // The returned error will be a manifest.ManifestLayerCompressionIncompatibilityError if - // manifests of type options.ManifestMIMEType can not include layers that are compressed - // in accordance with the CompressionOperation and CompressionAlgorithm specified in one - // or more options.LayerInfos items, though retrying with a different - // options.ManifestMIMEType or with different CompressionOperation+CompressionAlgorithm - // values might succeed. - UpdatedImage(ctx context.Context, options ManifestUpdateOptions) (Image, error) - // SupportsEncryption returns an indicator that the image supports encryption - // - // Deprecated: Initially used to determine if a manifest can be copied from a source manifest type since - // the process of updating a manifest between different manifest types was to update then convert. - // This resulted in some fields in the update being lost. This has been fixed by: https://github.com/containers/image/pull/836 - SupportsEncryption(ctx context.Context) bool - // Size returns an approximation of the amount of disk space which is consumed by the image in its current - // location. If the size is not known, -1 will be returned. - Size() (int64, error) -} - -// ImageCloser is an Image with a Close() method which must be called by the user. -// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, -// to ensure that the ImageSource is closed. -type ImageCloser interface { - Image - // Close removes resources associated with an initialized ImageCloser. - Close() error -} - -// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedImage -type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. - EmbeddedDockerReference reference.Named - ManifestMIMEType string - // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. - InformationOnly ManifestUpdateInformation -} - -// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here -// only to make writing struct literals possible. -type ManifestUpdateInformation struct { - Destination ImageDestination // and yes, UpdatedImage may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go) - LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers) - LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order. -} - -// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration. -// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported -// for other manifest types. -type ImageInspectInfo struct { - Tag string - Created *time.Time - DockerVersion string - Labels map[string]string - Architecture string - Variant string - Os string - Layers []string - LayersData []ImageInspectLayer - Env []string - Author string -} - -// ImageInspectLayer is a set of metadata describing an image layers' detail -type ImageInspectLayer struct { - MIMEType string // "" if unknown. - Digest digest.Digest - Size int64 // -1 if unknown. - Annotations map[string]string -} - -// DockerAuthConfig contains authorization information for connecting to a registry. -// the value of Username and Password can be empty for accessing the registry anonymously -type DockerAuthConfig struct { - Username string - Password string - // IdentityToken can be used as an refresh_token in place of username and - // password to obtain the bearer/access token in oauth2 flow. If identity - // token is set, password should not be set. - // Ref: https://docs.docker.com/registry/spec/auth/oauth/ - IdentityToken string -} - -// OptionalBool is a boolean with an additional undefined value, which is meant -// to be used in the context of user input to distinguish between a -// user-specified value and a default value. -type OptionalBool byte - -const ( - // OptionalBoolUndefined indicates that the OptionalBoolean hasn't been written. - OptionalBoolUndefined OptionalBool = iota - // OptionalBoolTrue represents the boolean true. - OptionalBoolTrue - // OptionalBoolFalse represents the boolean false. - OptionalBoolFalse -) - -// NewOptionalBool converts the input bool into either OptionalBoolTrue or -// OptionalBoolFalse. The function is meant to avoid boilerplate code of users. -func NewOptionalBool(b bool) OptionalBool { - o := OptionalBoolFalse - if b { - o = OptionalBoolTrue - } - return o -} - -// ShortNameMode defines the mode of short-name resolution. -// -// The use of unqualified-search registries entails an ambiguity as it's -// unclear from which registry a given image, referenced by a short name, may -// be pulled from. -// -// The ShortNameMode type defines how short names should resolve. -type ShortNameMode int - -const ( - ShortNameModeInvalid ShortNameMode = iota - // Use all configured unqualified-search registries without prompting - // the user. - ShortNameModeDisabled - // If stdout and stdin are a TTY, prompt the user to select a configured - // unqualified-search registry. Otherwise, use all configured - // unqualified-search registries. - // - // Note that if only one unqualified-search registry is set, it will be - // used without prompting. - ShortNameModePermissive - // Always prompt the user to select a configured unqualified-search - // registry. Throw an error if stdout or stdin is not a TTY as - // prompting isn't possible. - // - // Note that if only one unqualified-search registry is set, it will be - // used without prompting. - ShortNameModeEnforcing -) - -// SystemContext allows parameterizing access to implicitly-accessed resources, -// like configuration files in /etc and users' login state in their home directory. -// Various components can share the same field only if their semantics is exactly -// the same; if in doubt, add a new field. -// It is always OK to pass nil instead of a SystemContext. -type SystemContext struct { - // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/). - // Not used for any of the more specific path overrides available in this struct. - // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it). - // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths . - // and there is no need to worry about the environment.) - // NOTE: This does NOT affect paths starting by $HOME. - RootForImplicitAbsolutePaths string - - // === Global configuration overrides === - // If not "", overrides the system's default path for signature.Policy configuration. - SignaturePolicyPath string - // If not "", overrides the system's default path for registries.d (Docker signature storage configuration) - RegistriesDirPath string - // Path to the system-wide registries configuration file - SystemRegistriesConfPath string - // Path to the system-wide registries configuration directory - SystemRegistriesConfDirPath string - // Path to the user-specific short-names configuration file - UserShortNameAliasConfPath string - // If set, short-name resolution in pkg/shortnames must follow the specified mode - ShortNameMode *ShortNameMode - // If set, short names will resolve in pkg/shortnames to docker.io only, and unqualified-search registries and - // short-name aliases in registries.conf are ignored. Note that this field is only intended to help enforce - // resolving to Docker Hub in the Docker-compatible REST API of Podman; it should never be used outside this - // specific context. - PodmanOnlyShortNamesIgnoreRegistriesConfAndForceDockerHub bool - // If not "", overrides the default path for the registry authentication file, but only new format files - AuthFilePath string - // if not "", overrides the default path for the registry authentication file, but with the legacy format; - // the code currently will by default look for legacy format files like .dockercfg in the $HOME dir; - // but in addition to the home dir, openshift may mount .dockercfg files (via secret mount) - // in locations other than the home dir; openshift components should then set this field in those cases; - // this field is ignored if `AuthFilePath` is set (we favor the newer format); - // only reading of this data is supported; - LegacyFormatAuthFilePath string - // If set, a path to a Docker-compatible "config.json" file containing credentials; and no other files are processed. - // This must not be set if AuthFilePath is set. - // Only credentials and credential helpers in this file apre processed, not any other configuration in this file. - DockerCompatAuthFilePath string - // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. - ArchitectureChoice string - // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. - OSChoice string - // If not "", overrides the use of detected ARM platform variant when choosing an image or verifying variant match. - VariantChoice string - // If not "", overrides the system's default directory containing a blob info cache. - BlobInfoCacheDir string - // Additional tags when creating or copying a docker-archive. - DockerArchiveAdditionalTags []reference.NamedTagged - // If not "", overrides the temporary directory to use for storing big files - BigFilesTemporaryDir string - - // === OCI.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when downloading OCI image layers. - OCICertPath string - // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - OCIInsecureSkipTLSVerify bool - // If not "", use a shared directory for storing blobs rather than within OCI layouts - OCISharedBlobDirPath string - // Allow UnCompress image layer for OCI image layer - OCIAcceptUncompressedLayers bool - - // === docker.Transport overrides === - // If not "", a directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a container registry. - DockerCertPath string - // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. - // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - // Allow contacting container registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. - DockerInsecureSkipTLSVerify OptionalBool - // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials - // Ignored if DockerBearerRegistryToken is non-empty. - DockerAuthConfig *DockerAuthConfig - // if not "", the library uses this registry token to authenticate to the registry - DockerBearerRegistryToken string - // if not "", an User-Agent header is added to each request when contacting a registry. - DockerRegistryUserAgent string - // if true, a V1 ping attempt isn't done to give users a better error. Default is false. - // Note that this field is used mainly to integrate containers/image into projectatomic/docker - // in order to not break any existing docker's integration tests. - // Deprecated: The V1 container registry detection is no longer performed, so setting this flag has no effect. - DockerDisableV1Ping bool - // If true, dockerImageDestination.SupportedManifestMIMETypes will omit the Schema1 media types from the supported list - DockerDisableDestSchema1MIMETypes bool - // If true, the physical pull source of docker transport images logged as info level - DockerLogMirrorChoice bool - // Directory to use for OSTree temporary files - // - // Deprecated: The OSTree transport has been removed. - OSTreeTmpDirPath string - // If true, all blobs will have precomputed digests to ensure layers are not uploaded that already exist on the registry. - // Note that this requires writing blobs to temporary files, and takes more time than the default behavior, - // when the digest for a blob is unknown. - DockerRegistryPushPrecomputeDigests bool - // DockerProxyURL specifies proxy configuration schema (like socks5://username:password@ip:port) - DockerProxyURL *url.URL - - // === docker/daemon.Transport overrides === - // A directory containing a CA certificate (ending with ".crt"), - // a client certificate (ending with ".cert") and a client certificate key - // (ending with ".key") used when talking to a Docker daemon. - DockerDaemonCertPath string - // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. - DockerDaemonHost string - // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. - DockerDaemonInsecureSkipTLSVerify bool - - // === dir.Transport overrides === - // DirForceCompress compresses the image layers if set to true - DirForceCompress bool - // DirForceDecompress decompresses the image layers if set to true - DirForceDecompress bool - - // CompressionFormat is the format to use for the compression of the blobs - CompressionFormat *compression.Algorithm - // CompressionLevel specifies what compression level is used - CompressionLevel *int -} - -// ProgressEvent is the type of events a progress reader can produce -// Warning: new event types may be added any time. -type ProgressEvent uint - -const ( - // ProgressEventNewArtifact will be fired on progress reader setup - ProgressEventNewArtifact ProgressEvent = iota - - // ProgressEventRead indicates that the artifact download is currently in - // progress - ProgressEventRead - - // ProgressEventDone is fired when the data transfer has been finished for - // the specific artifact - ProgressEventDone - - // ProgressEventSkipped is fired when the artifact has been skipped because - // its already available at the destination - ProgressEventSkipped -) - -// ProgressProperties is used to pass information from the copy code to a monitor which -// can use the real-time information to produce output or react to changes. -type ProgressProperties struct { - // The event indicating what - Event ProgressEvent - - // The artifact which has been updated in this interval - Artifact BlobInfo - - // The currently downloaded size in bytes - // Increases from 0 to the final Artifact size - Offset uint64 - - // The additional offset which has been downloaded inside the last update - // interval. Will be reset after each ProgressEventRead event. - OffsetUpdate uint64 -} diff --git a/vendor/github.com/containers/image/v5/version/version.go b/vendor/github.com/containers/image/v5/version/version.go deleted file mode 100644 index f494845b8b..0000000000 --- a/vendor/github.com/containers/image/v5/version/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package version - -import "fmt" - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 5 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 36 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 2 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS deleted file mode 100644 index 129dd39692..0000000000 --- a/vendor/github.com/containers/storage/AUTHORS +++ /dev/null @@ -1,1523 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditi Rajagopal -Aditya -Adria Casas -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Ali Dehghani -Allen Madsen -Allen Sun -almoehi -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Carl Henrik Lunde -Carl X. Su -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Dave Barboza -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Gageot -David Gebler -David Lawrence -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -dcylabs -decadent -deed02392 -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dima Stopel -Dimitri John Ledkov -Dimitry Andric -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric Yang -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -Francesc Campoy -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -fy2462 -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Hyzhou <1187766782@qq.com> -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jhon Honce -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jordan Williams -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Justin Terry -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -kamjar gerami -Kanstantsin Shautsou -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Ke Xu -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickael Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin P. Kucharczyk -Kevin Shi -Kevin Wallace -Kevin Yap -kevinmeredith -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -mansinahar -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matt Moore -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -muge -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -Shekhar Gulati -Sheng Yang -Shengbo Song -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -srinsriv -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas LEVEIL -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Wang Xing -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -yorkie -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -搏通 diff --git a/vendor/github.com/containers/storage/LICENSE b/vendor/github.com/containers/storage/LICENSE deleted file mode 100644 index 8f3fee627a..0000000000 --- a/vendor/github.com/containers/storage/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/containers/storage/NOTICE b/vendor/github.com/containers/storage/NOTICE deleted file mode 100644 index 8a37c1c7bc..0000000000 --- a/vendor/github.com/containers/storage/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2016 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go deleted file mode 100644 index 4f340ae3c1..0000000000 --- a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock.go +++ /dev/null @@ -1,64 +0,0 @@ -package rawfilelock - -import ( - "os" -) - -type LockType byte - -const ( - ReadLock LockType = iota - WriteLock -) - -type FileHandle = fileHandle - -// OpenLock opens a file for locking -// WARNING: This is the underlying file locking primitive of the OS; -// because closing FileHandle releases the lock, it is not suitable for use -// if there is any chance of two concurrent goroutines attempting to use the same lock. -// Most users should use the higher-level operations from internal/staging_lockfile or pkg/lockfile. -func OpenLock(path string, readOnly bool) (FileHandle, error) { - flags := os.O_CREATE - if readOnly { - flags |= os.O_RDONLY - } else { - flags |= os.O_RDWR - } - - fd, err := openHandle(path, flags) - if err == nil { - return fd, nil - } - - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - -// TryLockFile attempts to lock a file handle -func TryLockFile(fd FileHandle, lockType LockType) error { - return lockHandle(fd, lockType, true) -} - -// LockFile locks a file handle -func LockFile(fd FileHandle, lockType LockType) error { - return lockHandle(fd, lockType, false) -} - -// UnlockAndClose unlocks and closes a file handle -func UnlockAndCloseHandle(fd FileHandle) { - unlockAndCloseHandle(fd) -} - -// CloseHandle closes a file handle without unlocking -// -// WARNING: This is a last-resort function for error handling only! -// On Unix systems, closing a file descriptor automatically releases any locks, -// so "closing without unlocking" is impossible. This function will release -// the lock as a side effect of closing the file. -// -// This function should only be used in error paths where the lock state -// is already corrupted or when giving up on lock management entirely. -// Normal code should use UnlockAndCloseHandle instead. -func CloseHandle(fd FileHandle) { - closeHandle(fd) -} diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go deleted file mode 100644 index 2685540769..0000000000 --- a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_unix.go +++ /dev/null @@ -1,49 +0,0 @@ -//go:build !windows - -package rawfilelock - -import ( - "time" - - "golang.org/x/sys/unix" -) - -type fileHandle uintptr - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= unix.O_CLOEXEC - fd, err := unix.Open(path, mode, 0o644) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { - fType := unix.F_RDLCK - if lType != ReadLock { - fType = unix.F_WRLCK - } - lk := unix.Flock_t{ - Type: int16(fType), - Whence: int16(unix.SEEK_SET), - Start: 0, - Len: 0, - } - cmd := unix.F_SETLKW - if nonblocking { - cmd = unix.F_SETLK - } - for { - err := unix.FcntlFlock(uintptr(fd), cmd, &lk) - if err == nil || nonblocking { - return err - } - time.Sleep(10 * time.Millisecond) - } -} - -func unlockAndCloseHandle(fd fileHandle) { - unix.Close(int(fd)) -} - -func closeHandle(fd fileHandle) { - unix.Close(int(fd)) -} diff --git a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go b/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go deleted file mode 100644 index 9c0d692f8a..0000000000 --- a/vendor/github.com/containers/storage/internal/rawfilelock/rawfilelock_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -//go:build windows - -package rawfilelock - -import ( - "golang.org/x/sys/windows" -) - -const ( - reserved = 0 - allBytes = ^uint32(0) -) - -type fileHandle windows.Handle - -func openHandle(path string, mode int) (fileHandle, error) { - mode |= windows.O_CLOEXEC - fd, err := windows.Open(path, mode, windows.S_IWRITE) - return fileHandle(fd), err -} - -func lockHandle(fd fileHandle, lType LockType, nonblocking bool) error { - flags := 0 - if lType != ReadLock { - flags = windows.LOCKFILE_EXCLUSIVE_LOCK - } - if nonblocking { - flags |= windows.LOCKFILE_FAIL_IMMEDIATELY - } - ol := new(windows.Overlapped) - if err := windows.LockFileEx(windows.Handle(fd), uint32(flags), reserved, allBytes, allBytes, ol); err != nil { - if nonblocking { - return err - } - panic(err) - } - return nil -} - -func unlockAndCloseHandle(fd fileHandle) { - ol := new(windows.Overlapped) - windows.UnlockFileEx(windows.Handle(fd), reserved, allBytes, allBytes, ol) - closeHandle(fd) -} - -func closeHandle(fd fileHandle) { - windows.Close(windows.Handle(fd)) -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go deleted file mode 100644 index eeecc9f75e..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_freebsd.go +++ /dev/null @@ -1,38 +0,0 @@ -package fileutils - -import ( - "errors" - "os" - "syscall" - - "golang.org/x/sys/unix" -) - -// Exists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink is followed. -func Exists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, 0) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} - -// Lexists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink itself is checked. -func Lexists(path string) error { - // FreeBSD before 15.0 does not support the AT_SYMLINK_NOFOLLOW flag for - // faccessat. In this case, the call to faccessat will return EINVAL and - // we fall back to using Lstat. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW) - if err != nil { - if errors.Is(err, syscall.EINVAL) { - _, err = os.Lstat(path) - return err - } - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go deleted file mode 100644 index 04cfafcd5c..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build !windows && !freebsd - -package fileutils - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// Exists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink is followed. -func Exists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_EACCESS) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} - -// Lexists checks whether a file or directory exists at the given path. -// If the path is a symlink, the symlink itself is checked. -func Lexists(path string) error { - // It uses unix.Faccessat which is a faster operation compared to os.Stat for - // simply checking the existence of a file. - err := unix.Faccessat(unix.AT_FDCWD, path, unix.F_OK, unix.AT_SYMLINK_NOFOLLOW|unix.AT_EACCESS) - if err != nil { - return &os.PathError{Op: "faccessat", Path: path, Err: err} - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go deleted file mode 100644 index 355cf04647..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/exists_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package fileutils - -import ( - "os" -) - -// Exists checks whether a file or directory exists at the given path. -func Exists(path string) error { - _, err := os.Stat(path) - return err -} - -// Lexists checks whether a file or directory exists at the given path, without -// resolving symlinks -func Lexists(path string) error { - _, err := os.Lstat(path) - return err -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go deleted file mode 100644 index 85ce2d5260..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,371 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/sirupsen/logrus" -) - -// PatternMatcher allows checking paths against a list of patterns -type PatternMatcher struct { - patterns []*Pattern - exclusions bool -} - -// NewPatternMatcher creates a new matcher object for specific patterns that can -// be used later to match against patterns against paths -func NewPatternMatcher(patterns []string) (*PatternMatcher, error) { - pm := &PatternMatcher{ - patterns: make([]*Pattern, 0, len(patterns)), - } - for _, p := range patterns { - // Eliminate leading and trailing whitespace. - p = strings.TrimSpace(p) - if p == "" { - continue - } - p = filepath.Clean(p) - newp := &Pattern{} - if p[0] == '!' { - if len(p) == 1 { - return nil, errors.New("illegal exclusion pattern: \"!\"") - } - newp.exclusion = true - p = strings.TrimPrefix(filepath.Clean(p[1:]), "/") - pm.exclusions = true - } - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(p, "."); err != nil { - return nil, err - } - newp.cleanedPattern = p - newp.dirs = strings.Split(p, string(os.PathSeparator)) - pm.patterns = append(pm.patterns, newp) - } - return pm, nil -} - -// Deprecated: Please use the `MatchesResult` method instead. -// Matches matches path against all the patterns. Matches is not safe to be -// called concurrently -func (pm *PatternMatcher) Matches(file string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return false, err - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -type MatchResult struct { - isMatched bool - matches, excludes uint -} - -// Excludes returns true if the overall result is matched -func (m *MatchResult) IsMatched() bool { - return m.isMatched -} - -// Excludes returns the amount of matches of an MatchResult -func (m *MatchResult) Matches() uint { - return m.matches -} - -// Excludes returns the amount of excludes of an MatchResult -func (m *MatchResult) Excludes() uint { - return m.excludes -} - -// MatchesResult verifies the provided filepath against all patterns. -// It returns the `*MatchResult` result for the patterns on success, otherwise -// an error. This method is not safe to be called concurrently. -func (pm *PatternMatcher) MatchesResult(file string) (res *MatchResult, err error) { - file = filepath.FromSlash(file) - res = &MatchResult{false, 0, 0} - - for _, pattern := range pm.patterns { - negative := false - - if pattern.exclusion { - negative = true - } - - match, err := pattern.match(file) - if err != nil { - return nil, err - } - - if match { - res.isMatched = !negative - if negative { - res.excludes++ - } else { - res.matches++ - } - } - } - - if res.matches > 0 { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return res, nil -} - -// IsMatch verifies the provided filepath against all patterns and returns true -// if it matches. A match is valid if the last match is a positive one. -// It returns an error on failure and is not safe to be called concurrently. -func (pm *PatternMatcher) IsMatch(file string) (matched bool, err error) { - res, err := pm.MatchesResult(file) - if err != nil { - return false, err - } - return res.isMatched, nil -} - -// Exclusions returns true if any of the patterns define exclusions -func (pm *PatternMatcher) Exclusions() bool { - return pm.exclusions -} - -// Patterns returns array of active patterns -func (pm *PatternMatcher) Patterns() []*Pattern { - return pm.patterns -} - -// Pattern defines a single regexp used to filter file paths. -type Pattern struct { - cleanedPattern string - dirs []string - regexp *regexp.Regexp - exclusion bool -} - -func (p *Pattern) String() string { - return p.cleanedPattern -} - -// Exclusion returns true if this pattern defines exclusion -func (p *Pattern) Exclusion() bool { - return p.exclusion -} - -func (p *Pattern) match(path string) (bool, error) { - if p.regexp == nil { - if err := p.compile(); err != nil { - return false, filepath.ErrBadPattern - } - } - - b := p.regexp.MatchString(path) - - return b, nil -} - -func (p *Pattern) compile() error { - regStr := "^" - pattern := p.cleanedPattern - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - const bs = `\` - if sl == bs { - escSL += bs - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - // Note that this allows for any # of /'s (even 0) because - // the .* will eat everything, even /'s - regStr += "(.*" + escSL + ")?" - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if ch == '.' || ch == '$' { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += bs + string(ch) - } else if ch == '\\' { - // escape next char. - if sl == bs { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += bs + string(scan.Next()) - } else { - return filepath.ErrBadPattern - } - } else { - regStr += string(ch) - } - } - - regStr += "(" + escSL + ".*)?$" - - re, err := regexp.Compile(regStr) - if err != nil { - return err - } - - p.regexp = re - return nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - pm, err := NewPatternMatcher(patterns) - if err != nil { - return false, err - } - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - return pm.IsMatch(file) -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %w", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %w", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %w", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// ReadSymlinkedPath returns the target directory of a symlink. -// The target of the symbolic link can be a file and a directory. -func ReadSymlinkedPath(path string) (realPath string, err error) { - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %q: %w", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %q: %w", path, err) - } - if err := Exists(realPath); err != nil { - return "", fmt.Errorf("failed to stat target %q of %q: %w", realPath, path, err) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if err := Exists(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0o755) - } - if err := os.MkdirAll(filepath.Dir(path), 0o755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0o755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go deleted file mode 100644 index ccd648fac3..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_darwin.go +++ /dev/null @@ -1,27 +0,0 @@ -package fileutils - -import ( - "os" - "os/exec" - "strconv" - "strings" -) - -// GetTotalUsedFds returns the number of used File Descriptors by -// executing `lsof -p PID` -func GetTotalUsedFds() int { - pid := os.Getpid() - - cmd := exec.Command("lsof", "-p", strconv.Itoa(pid)) - - output, err := cmd.CombinedOutput() - if err != nil { - return -1 - } - - outputStr := strings.TrimSpace(string(output)) - - fds := strings.Split(outputStr, "\n") - - return len(fds) - 1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab93..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index 3cb250c5a3..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build linux || freebsd - -package fileutils - -import ( - "fmt" - "os" - - "github.com/sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("%v", err) - } else { - return len(fds) - } - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go b/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace5..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go deleted file mode 100644 index 9f5c6c90bb..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/reflink_linux.go +++ /dev/null @@ -1,20 +0,0 @@ -package fileutils - -import ( - "io" - "os" - - "golang.org/x/sys/unix" -) - -// ReflinkOrCopy attempts to reflink the source to the destination fd. -// If reflinking fails or is unsupported, it falls back to io.Copy(). -func ReflinkOrCopy(src, dst *os.File) error { - err := unix.IoctlFileClone(int(dst.Fd()), int(src.Fd())) - if err == nil { - return nil - } - - _, err = io.Copy(dst, src) - return err -} diff --git a/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go b/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go deleted file mode 100644 index c0a30e670c..0000000000 --- a/vendor/github.com/containers/storage/pkg/fileutils/reflink_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -//go:build !linux - -package fileutils - -import ( - "io" - "os" -) - -// ReflinkOrCopy attempts to reflink the source to the destination fd. -// If reflinking fails or is unsupported, it falls back to io.Copy(). -func ReflinkOrCopy(src, dst *os.File) error { - _, err := io.Copy(dst, src) - return err -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir.go b/vendor/github.com/containers/storage/pkg/homedir/homedir.go deleted file mode 100644 index 7eb63b67a4..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir.go +++ /dev/null @@ -1,37 +0,0 @@ -package homedir - -import ( - "errors" - "os" - "path/filepath" -) - -// GetDataHome returns XDG_DATA_HOME. -// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetDataHome() (string, error) { - if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { - return xdgDataHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_DATA_HOME or HOME") - } - return filepath.Join(home, ".local", "share"), nil -} - -// GetCacheHome returns XDG_CACHE_HOME. -// GetCacheHome returns $HOME/.cache and nil error if XDG_CACHE_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetCacheHome() (string, error) { - if xdgCacheHome := os.Getenv("XDG_CACHE_HOME"); xdgCacheHome != "" { - return xdgCacheHome, nil - } - home := Get() - if home == "" { - return "", errors.New("could not get either XDG_CACHE_HOME or HOME") - } - return filepath.Join(home, ".cache"), nil -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go deleted file mode 100644 index f351b48bb4..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_unix.go +++ /dev/null @@ -1,182 +0,0 @@ -//go:build !windows - -package homedir - -// Copyright 2013-2018 Docker, Inc. -// NOTE: this package has originally been copied from github.com/docker/docker. - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/containers/storage/pkg/unshare" - "github.com/sirupsen/logrus" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -// -// If linking statically with cgo enabled against glibc, ensure the -// osusergo build tag is used. -// -// If needing to do nss lookups, do not disable cgo or set osusergo. -func Get() string { - homedir, _ := unshare.HomeDir() - return homedir -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "~" -} - -// StickRuntimeDirContents sets the sticky bit on files that are under -// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. -// -// StickyRuntimeDir returns slice of sticked files. -// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func StickRuntimeDirContents(files []string) ([]string, error) { - runtimeDir, err := GetRuntimeDir() - if err != nil { - // ignore error if runtimeDir is empty - return nil, nil //nolint: nilerr - } - runtimeDir, err = filepath.Abs(runtimeDir) - if err != nil { - return nil, err - } - var sticked []string - for _, f := range files { - f, err = filepath.Abs(f) - if err != nil { - return sticked, err - } - if strings.HasPrefix(f, runtimeDir+"/") { - if err = stick(f); err != nil { - return sticked, err - } - sticked = append(sticked, f) - } - } - return sticked, nil -} - -func stick(f string) error { - st, err := os.Stat(f) - if err != nil { - return err - } - m := st.Mode() - m |= os.ModeSticky - return os.Chmod(f, m) -} - -var ( - rootlessConfigHomeDirError error - rootlessConfigHomeDirOnce sync.Once - rootlessConfigHomeDir string - rootlessRuntimeDirOnce sync.Once - rootlessRuntimeDir string -) - -// isWriteableOnlyByOwner checks that the specified permission mask allows write -// access only to the owner. -func isWriteableOnlyByOwner(perm os.FileMode) bool { - return (perm & 0o722) == 0o700 -} - -// GetConfigHome returns XDG_CONFIG_HOME. -// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetConfigHome() (string, error) { - rootlessConfigHomeDirOnce.Do(func() { - cfgHomeDir := os.Getenv("XDG_CONFIG_HOME") - if cfgHomeDir == "" { - home := Get() - resolvedHome, err := filepath.EvalSymlinks(home) - if err != nil { - rootlessConfigHomeDirError = fmt.Errorf("cannot resolve %s: %w", home, err) - return - } - tmpDir := filepath.Join(resolvedHome, ".config") - _ = os.MkdirAll(tmpDir, 0o700) - st, err := os.Stat(tmpDir) - if err != nil { - rootlessConfigHomeDirError = err - return - } else if int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() { - cfgHomeDir = tmpDir - } else { - rootlessConfigHomeDirError = fmt.Errorf("path %q exists and it is not owned by the current user", tmpDir) - return - } - } - rootlessConfigHomeDir = cfgHomeDir - }) - - return rootlessConfigHomeDir, rootlessConfigHomeDirError -} - -// GetRuntimeDir returns a directory suitable to store runtime files. -// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable -// directory for the current user. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - var rootlessRuntimeDirError error - - rootlessRuntimeDirOnce.Do(func() { - runtimeDir := os.Getenv("XDG_RUNTIME_DIR") - - if runtimeDir != "" { - rootlessRuntimeDir, rootlessRuntimeDirError = filepath.EvalSymlinks(runtimeDir) - return - } - - uid := strconv.Itoa(unshare.GetRootlessUID()) - if runtimeDir == "" { - tmpDir := filepath.Join("/run", "user", uid) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debug(err) - } - st, err := os.Lstat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } - } - if runtimeDir == "" { - tmpDir := filepath.Join(os.TempDir(), fmt.Sprintf("storage-run-%s", uid)) - if err := os.MkdirAll(tmpDir, 0o700); err != nil { - logrus.Debug(err) - } - st, err := os.Lstat(tmpDir) - if err == nil && int(st.Sys().(*syscall.Stat_t).Uid) == os.Geteuid() && isWriteableOnlyByOwner(st.Mode().Perm()) { - runtimeDir = tmpDir - } else { - rootlessRuntimeDirError = fmt.Errorf("path %q exists and it is not writeable only by the current user", tmpDir) - return - } - } - rootlessRuntimeDir = runtimeDir - }) - - return rootlessRuntimeDir, rootlessRuntimeDirError -} diff --git a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go b/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go deleted file mode 100644 index a76610f90e..0000000000 --- a/vendor/github.com/containers/storage/pkg/homedir/homedir_windows.go +++ /dev/null @@ -1,61 +0,0 @@ -package homedir - -// Copyright 2013-2018 Docker, Inc. -// NOTE: this package has originally been copied from github.com/docker/docker. - -import ( - "os" - "path/filepath" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - return "USERPROFILE" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home != "" { - return home - } - home, _ = os.UserHomeDir() - return home -} - -// GetConfigHome returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func GetConfigHome() (string, error) { - return filepath.Join(Get(), ".config"), nil -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - return "%USERPROFILE%" // be careful while using in format functions -} - -// StickRuntimeDirContents is a no-op on Windows -func StickRuntimeDirContents(files []string) ([]string, error) { - return nil, nil -} - -// GetRuntimeDir returns a directory suitable to store runtime files. -// The function will try to use the XDG_RUNTIME_DIR env variable if it is set. -// XDG_RUNTIME_DIR is typically configured via pam_systemd. -// If XDG_RUNTIME_DIR is not set, GetRuntimeDir will try to find a suitable -// directory for the current user. -// -// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html -func GetRuntimeDir() (string, error) { - data, err := GetDataHome() - if err != nil { - return "", err - } - runtimeDir := filepath.Join(data, "containers", "storage") - return runtimeDir, nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools.go b/vendor/github.com/containers/storage/pkg/idtools/idtools.go deleted file mode 100644 index 13277f090e..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools.go +++ /dev/null @@ -1,620 +0,0 @@ -package idtools - -import ( - "bufio" - "errors" - "fmt" - "io/fs" - "os" - "os/user" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/containers/storage/pkg/system" - "github.com/sirupsen/logrus" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" - ContainersOverrideXattr = "user.containers.override_stat" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -// Deprecated: Use MkdirAllAndChown -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -// Deprecated: Use MkdirAndChown with a IDPair -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// MkdirAllAndChown creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, true) -} - -// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, false, true) -} - -// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error { - return mkdirAs(path, mode, ids.UID, ids.GID, true, false) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - var err error - if len(uidMap) == 1 && uidMap[0].Size == 1 { - uid = uidMap[0].HostID - } else { - uid, err = RawToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - } - if len(gidMap) == 1 && gidMap[0].Size == 1 { - gid = gidMap[0].HostID - } else { - gid, err = RawToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - } - return uid, gid, nil -} - -// RawToContainer takes an id mapping, and uses it to translate a host ID to -// the remapped ID. If no map is provided, then the translation assumes a -// 1-to-1 mapping and returns the passed in id. -// -// If you wish to map a (uid,gid) combination you should use the corresponding -// IDMappings methods, which ensure that you are mapping the correct ID against -// the correct mapping. -func RawToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID) -} - -// RawToHost takes an id mapping and a remapped ID, and translates the ID to -// the mapped host ID. If no map is provided, then the translation assumes a -// 1-to-1 mapping and returns the passed in id. -// -// If you wish to map a (uid,gid) combination you should use the corresponding -// IDMappings methods, which ensure that you are mapping the correct ID against -// the correct mapping. -func RawToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID) -} - -// IDPair is a UID and GID pair -type IDPair struct { - UID int - GID int -} - -// IDMappings contains a mappings of UIDs and GIDs -type IDMappings struct { - uids []IDMap - gids []IDMap -} - -// NewIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func NewIDMappings(username, groupname string) (*IDMappings, error) { - subuidRanges, err := readSubuid(username) - if err != nil { - return nil, err - } - subgidRanges, err := readSubgid(groupname) - if err != nil { - return nil, err - } - if len(subuidRanges) == 0 { - return nil, fmt.Errorf("no subuid ranges found for user %q in %s", username, subuidFileName) - } - if len(subgidRanges) == 0 { - return nil, fmt.Errorf("no subgid ranges found for group %q in %s", groupname, subgidFileName) - } - - return &IDMappings{ - uids: createIDMap(subuidRanges), - gids: createIDMap(subgidRanges), - }, nil -} - -// NewIDMappingsFromMaps creates a new mapping from two slices -// Deprecated: this is a temporary shim while transitioning to IDMapping -func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings { - return &IDMappings{uids: uids, gids: gids} -} - -// RootPair returns a uid and gid pair for the root user. The error is ignored -// because a root user always exists, and the defaults are correct when the uid -// and gid maps are empty. -func (i *IDMappings) RootPair() IDPair { - uid, gid, _ := GetRootUIDGID(i.uids, i.gids) - return IDPair{UID: uid, GID: gid} -} - -// ToHost returns the host UID and GID for the container uid, gid. -func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) { - var err error - var target IDPair - - target.UID, err = RawToHost(pair.UID, i.uids) - if err != nil { - return target, err - } - - target.GID, err = RawToHost(pair.GID, i.gids) - return target, err -} - -var ( - overflowUIDOnce sync.Once - overflowGIDOnce sync.Once - overflowUID int - overflowGID int -) - -// getOverflowUID returns the UID mapped to the overflow user -func getOverflowUID() int { - overflowUIDOnce.Do(func() { - // 65534 is the value on older kernels where /proc/sys/kernel/overflowuid is not present - overflowUID = 65534 - if content, err := os.ReadFile("/proc/sys/kernel/overflowuid"); err == nil { - if tmp, err := strconv.Atoi(string(content)); err == nil { - overflowUID = tmp - } - } - }) - return overflowUID -} - -// getOverflowGID returns the GID mapped to the overflow user -func getOverflowGID() int { - overflowGIDOnce.Do(func() { - // 65534 is the value on older kernels where /proc/sys/kernel/overflowgid is not present - overflowGID = 65534 - if content, err := os.ReadFile("/proc/sys/kernel/overflowgid"); err == nil { - if tmp, err := strconv.Atoi(string(content)); err == nil { - overflowGID = tmp - } - } - }) - return overflowGID -} - -// ToHost returns the host UID and GID for the container uid, gid. -// Remapping is only performed if the ids aren't already the remapped root ids -// If the mapping is not possible because the target ID is not mapped into -// the namespace, then the overflow ID is used. -func (i *IDMappings) ToHostOverflow(pair IDPair) (IDPair, error) { - var err error - target := i.RootPair() - - if pair.UID != target.UID { - target.UID, err = RawToHost(pair.UID, i.uids) - if err != nil { - target.UID = getOverflowUID() - logrus.Debugf("Failed to map UID %v to the target mapping, using the overflow ID %v", pair.UID, target.UID) - } - } - - if pair.GID != target.GID { - target.GID, err = RawToHost(pair.GID, i.gids) - if err != nil { - target.GID = getOverflowGID() - logrus.Debugf("Failed to map GID %v to the target mapping, using the overflow ID %v", pair.GID, target.GID) - } - } - return target, nil -} - -// ToContainer returns the container UID and GID for the host uid and gid -func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) { - uid, err := RawToContainer(pair.UID, i.uids) - if err != nil { - return -1, -1, err - } - gid, err := RawToContainer(pair.GID, i.gids) - return uid, gid, err -} - -// Empty returns true if there are no id mappings -func (i *IDMappings) Empty() bool { - return len(i.uids) == 0 && len(i.gids) == 0 -} - -// UIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) UIDs() []IDMap { - return i.uids -} - -// GIDs return the UID mapping -// TODO: remove this once everything has been refactored to use pairs -func (i *IDMappings) GIDs() []IDMap { - return i.gids -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var ( - rangeList ranges - uidstr string - ) - if u, err := user.Lookup(username); err == nil { - uidstr = u.Uid - } - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" || (parts[0] == uidstr && parts[0] != "") { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("string to int conversion failed during subuid/gid parsing of %s: %w", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} - -func checkChownErr(err error, name string, uid, gid int) error { - var e *os.PathError - if errors.As(err, &e) && e.Err == syscall.EINVAL { - return fmt.Errorf(`potentially insufficient UIDs or GIDs available in user namespace (requested %d:%d for %s): Check /etc/subuid and /etc/subgid if configured locally and run "podman system migrate": %w`, uid, gid, name, err) - } - return err -} - -// Stat contains file states that can be overridden with ContainersOverrideXattr. -type Stat struct { - IDs IDPair - Mode os.FileMode - Major int - Minor int -} - -// FormatContainersOverrideXattr will format the given uid, gid, and mode into a string -// that can be used as the value for the ContainersOverrideXattr xattr. -func FormatContainersOverrideXattr(uid, gid, mode int) string { - return FormatContainersOverrideXattrDevice(uid, gid, fs.FileMode(mode), 0, 0) -} - -// FormatContainersOverrideXattrDevice will format the given uid, gid, and mode into a string -// that can be used as the value for the ContainersOverrideXattr xattr. For devices, it also -// needs the major and minor numbers. -func FormatContainersOverrideXattrDevice(uid, gid int, mode fs.FileMode, major, minor int) string { - typ := "" - switch mode & os.ModeType { - case os.ModeDir: - typ = "dir" - case os.ModeSymlink: - typ = "symlink" - case os.ModeNamedPipe: - typ = "pipe" - case os.ModeSocket: - typ = "socket" - case os.ModeDevice: - typ = fmt.Sprintf("block-%d-%d", major, minor) - case os.ModeDevice | os.ModeCharDevice: - typ = fmt.Sprintf("char-%d-%d", major, minor) - default: - typ = "file" - } - unixMode := mode & os.ModePerm - if mode&os.ModeSetuid != 0 { - unixMode |= 0o4000 - } - if mode&os.ModeSetgid != 0 { - unixMode |= 0o2000 - } - if mode&os.ModeSticky != 0 { - unixMode |= 0o1000 - } - return fmt.Sprintf("%d:%d:%04o:%s", uid, gid, unixMode, typ) -} - -// GetContainersOverrideXattr will get and decode ContainersOverrideXattr. -func GetContainersOverrideXattr(path string) (Stat, error) { - xstat, err := system.Lgetxattr(path, ContainersOverrideXattr) - if err != nil { - return Stat{}, err - } - return parseOverrideXattr(xstat) // This will fail if (xstat, err) == (nil, nil), i.e. the xattr does not exist. -} - -func parseOverrideXattr(xstat []byte) (Stat, error) { - var stat Stat - attrs := strings.Split(string(xstat), ":") - if len(attrs) < 3 { - return stat, fmt.Errorf("the number of parts in %s is less than 3", - ContainersOverrideXattr) - } - - value, err := strconv.ParseUint(attrs[0], 10, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse UID: %w", err) - } - stat.IDs.UID = int(value) - - value, err = strconv.ParseUint(attrs[1], 10, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse GID: %w", err) - } - stat.IDs.GID = int(value) - - value, err = strconv.ParseUint(attrs[2], 8, 32) - if err != nil { - return stat, fmt.Errorf("failed to parse mode: %w", err) - } - stat.Mode = os.FileMode(value) & os.ModePerm - if value&0o1000 != 0 { - stat.Mode |= os.ModeSticky - } - if value&0o2000 != 0 { - stat.Mode |= os.ModeSetgid - } - if value&0o4000 != 0 { - stat.Mode |= os.ModeSetuid - } - - if len(attrs) > 3 { - typ := attrs[3] - if strings.HasPrefix(typ, "file") { - } else if strings.HasPrefix(typ, "dir") { - stat.Mode |= os.ModeDir - } else if strings.HasPrefix(typ, "symlink") { - stat.Mode |= os.ModeSymlink - } else if strings.HasPrefix(typ, "pipe") { - stat.Mode |= os.ModeNamedPipe - } else if strings.HasPrefix(typ, "socket") { - stat.Mode |= os.ModeSocket - } else if strings.HasPrefix(typ, "block") { - stat.Mode |= os.ModeDevice - stat.Major, stat.Minor, err = parseDevice(typ) - if err != nil { - return stat, err - } - } else if strings.HasPrefix(typ, "char") { - stat.Mode |= os.ModeDevice | os.ModeCharDevice - stat.Major, stat.Minor, err = parseDevice(typ) - if err != nil { - return stat, err - } - } else { - return stat, fmt.Errorf("invalid file type %s", typ) - } - } - return stat, nil -} - -func parseDevice(typ string) (int, int, error) { - parts := strings.Split(typ, "-") - // If there are more than 3 parts, just ignore them to be forward compatible - if len(parts) < 3 { - return 0, 0, fmt.Errorf("invalid device type %s", typ) - } - if parts[0] != "block" && parts[0] != "char" { - return 0, 0, fmt.Errorf("invalid device type %s", typ) - } - major, err := strconv.Atoi(parts[1]) - if err != nil { - return 0, 0, fmt.Errorf("failed to parse major number: %w", err) - } - minor, err := strconv.Atoi(parts[2]) - if err != nil { - return 0, 0, fmt.Errorf("failed to parse minor number: %w", err) - } - return major, minor, nil -} - -// SetContainersOverrideXattr will encode and set ContainersOverrideXattr. -func SetContainersOverrideXattr(path string, stat Stat) error { - value := FormatContainersOverrideXattrDevice(stat.IDs.UID, stat.IDs.GID, stat.Mode, stat.Major, stat.Minor) - return system.Lsetxattr(path, ContainersOverrideXattr, []byte(value), 0) -} - -func SafeChown(name string, uid, gid int) error { - if runtime.GOOS == "darwin" { - stat := Stat{ - Mode: os.FileMode(0o0700), - } - xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil && xstat != nil { - stat, err = parseOverrideXattr(xstat) - if err != nil { - return err - } - } else { - st, err := os.Stat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. - if err != nil { - return err - } - stat.Mode = st.Mode() - } - stat.IDs = IDPair{UID: uid, GID: gid} - if err = SetContainersOverrideXattr(name, stat); err != nil { - return err - } - uid = os.Getuid() - gid = os.Getgid() - } - if stat, statErr := system.Stat(name); statErr == nil { - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - } - return checkChownErr(os.Chown(name, uid, gid), name, uid, gid) -} - -func SafeLchown(name string, uid, gid int) error { - if runtime.GOOS == "darwin" { - stat := Stat{ - Mode: os.FileMode(0o0700), - } - xstat, err := system.Lgetxattr(name, ContainersOverrideXattr) - if err == nil && xstat != nil { - stat, err = parseOverrideXattr(xstat) - if err != nil { - return err - } - } else { - st, err := os.Lstat(name) // Ideally we would share this with system.Stat below, but then we would need to convert Mode. - if err != nil { - return err - } - stat.Mode = st.Mode() - } - stat.IDs = IDPair{UID: uid, GID: gid} - if err = SetContainersOverrideXattr(name, stat); err != nil { - return err - } - uid = os.Getuid() - gid = os.Getgid() - } - if stat, statErr := system.Lstat(name); statErr == nil { - if stat.UID() == uint32(uid) && stat.GID() == uint32(gid) { - return nil - } - } - return checkChownErr(os.Lchown(name, uid, gid), name, uid, gid) -} - -type sortByHostID []IDMap - -func (e sortByHostID) Len() int { return len(e) } -func (e sortByHostID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByHostID) Less(i, j int) bool { return e[i].HostID < e[j].HostID } - -type sortByContainerID []IDMap - -func (e sortByContainerID) Len() int { return len(e) } -func (e sortByContainerID) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e sortByContainerID) Less(i, j int) bool { return e[i].ContainerID < e[j].ContainerID } - -// IsContiguous checks if the specified mapping is contiguous and doesn't -// have any hole. -func IsContiguous(mappings []IDMap) bool { - if len(mappings) < 2 { - return true - } - - var mh sortByHostID = mappings[:] - sort.Sort(mh) - for i := 1; i < len(mh); i++ { - if mh[i].HostID != mh[i-1].HostID+mh[i-1].Size { - return false - } - } - - var mc sortByContainerID = mappings[:] - sort.Sort(mc) - for i := 1; i < len(mc); i++ { - if mc[i].ContainerID != mc[i-1].ContainerID+mc[i-1].Size { - return false - } - } - return true -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go deleted file mode 100644 index 9a17f57014..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_supported.go +++ /dev/null @@ -1,91 +0,0 @@ -//go:build linux && cgo && libsubid - -package idtools - -import ( - "errors" - "os/user" - "sync" - "unsafe" -) - -/* -#cgo LDFLAGS: -l subid -#include -#include -#include - -struct subid_range get_range(struct subid_range *ranges, int i) -{ - return ranges[i]; -} - -#if !defined(SUBID_ABI_MAJOR) || (SUBID_ABI_MAJOR < 4) -# define subid_init libsubid_init -# define subid_get_uid_ranges get_subuid_ranges -# define subid_get_gid_ranges get_subgid_ranges -#endif - -*/ -import "C" - -var onceInit sync.Once - -func readSubid(username string, isUser bool) (ranges, error) { - var ret ranges - uidstr := "" - - if username == "ALL" { - return nil, errors.New("username ALL not supported") - } - - if u, err := user.Lookup(username); err == nil { - uidstr = u.Uid - } - - onceInit.Do(func() { - C.subid_init(C.CString("storage"), C.stderr) - }) - - cUsername := C.CString(username) - defer C.free(unsafe.Pointer(cUsername)) - - cuidstr := C.CString(uidstr) - defer C.free(unsafe.Pointer(cuidstr)) - - var nRanges C.int - var cRanges *C.struct_subid_range - if isUser { - nRanges = C.subid_get_uid_ranges(cUsername, &cRanges) - if nRanges <= 0 { - nRanges = C.subid_get_uid_ranges(cuidstr, &cRanges) - } - } else { - nRanges = C.subid_get_gid_ranges(cUsername, &cRanges) - if nRanges <= 0 { - nRanges = C.subid_get_gid_ranges(cuidstr, &cRanges) - } - } - if nRanges < 0 { - return nil, errors.New("cannot read subids") - } - defer C.free(unsafe.Pointer(cRanges)) - - for i := 0; i < int(nRanges); i++ { - r := C.get_range(cRanges, C.int(i)) - newRange := subIDRange{ - Start: int(r.start), - Length: int(r.count), - } - ret = append(ret, newRange) - } - return ret, nil -} - -func readSubuid(username string) (ranges, error) { - return readSubid(username, true) -} - -func readSubgid(username string) (ranges, error) { - return readSubid(username, false) -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go deleted file mode 100644 index 1da7dadbfa..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,214 +0,0 @@ -//go:build !windows - -package idtools - -import ( - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - - "github.com/containers/storage/pkg/fileutils" - "github.com/containers/storage/pkg/system" - "github.com/moby/sys/user" -) - -var ( - entOnce sync.Once - getentCmd string -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - st, err := os.Stat(path) - if err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil { - if !st.IsDir() { - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - if chownExisting { - // short-circuit--we were called with an existing directory and chown was requested - return SafeChown(path, ownerUID, ownerGID) - } - // nothing to do; directory exists and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - if !filepath.IsAbs(dirPath) { - return fmt.Errorf("path: %s should be absolute", dirPath) - } - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if err := fileutils.Exists(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := os.MkdirAll(path, mode); err != nil { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := SafeChown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -func CanAccess(path string, pair IDPair) bool { - statInfo, err := system.Stat(path) - if err != nil { - return false - } - fileMode := os.FileMode(statInfo.Mode()) - permBits := fileMode.Perm() - return accessible(statInfo.UID() == uint32(pair.UID), - statInfo.GID() == uint32(pair.GID), permBits) -} - -func accessible(isOwner, isGroup bool, perms os.FileMode) bool { - if isOwner && (perms&0o100 == 0o100) { - return true - } - if isGroup && (perms&0o010 == 0o010) { - return true - } - if perms&0o001 == 0o001 { - return true - } - return false -} - -// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUser(username string) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUser(username) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username)) - if err != nil { - return user.User{}, err - } - return usr, nil -} - -// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupUID(uid int) (user.User, error) { - // first try a local system files lookup using existing capabilities - usr, err := user.LookupUid(uid) - if err == nil { - return usr, nil - } - // local files lookup failed; attempt to call `getent` to query configured passwd dbs - return getentUser(fmt.Sprintf("%s %d", "passwd", uid)) -} - -func getentUser(args string) (user.User, error) { - reader, err := callGetent(args) - if err != nil { - return user.User{}, err - } - users, err := user.ParsePasswd(reader) - if err != nil { - return user.User{}, err - } - if len(users) == 0 { - return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1]) - } - return users[0], nil -} - -// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGroup(groupname string) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGroup(groupname) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %s", "group", groupname)) -} - -// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID, -// followed by a call to `getent` for supporting host configured non-files passwd and group dbs -func LookupGID(gid int) (user.Group, error) { - // first try a local system files lookup using existing capabilities - group, err := user.LookupGid(gid) - if err == nil { - return group, nil - } - // local files lookup failed; attempt to call `getent` to query configured group dbs - return getentGroup(fmt.Sprintf("%s %d", "group", gid)) -} - -func getentGroup(args string) (user.Group, error) { - reader, err := callGetent(args) - if err != nil { - return user.Group{}, err - } - groups, err := user.ParseGroup(reader) - if err != nil { - return user.Group{}, err - } - if len(groups) == 0 { - return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1]) - } - return groups[0], nil -} - -func callGetent(args string) (io.Reader, error) { - entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") }) - // if no `getent` command on host, can't do anything else - if getentCmd == "" { - return nil, fmt.Errorf("") - } - out, err := execCmd(getentCmd, args) - if err != nil { - exitCode, errC := system.GetExitCode(err) - if errC != nil { - return nil, err - } - switch exitCode { - case 1: - return nil, fmt.Errorf("getent reported invalid parameters/database unknown") - case 2: - terms := strings.Split(args, " ") - return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0]) - case 3: - return nil, fmt.Errorf("getent database doesn't support enumeration") - default: - return nil, err - } - - } - return bytes.NewReader(out), nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go deleted file mode 100644 index e6f5c1ba68..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !linux || !libsubid || !cgo - -package idtools - -func readSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func readSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go b/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go deleted file mode 100644 index ec6a3a0469..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -//go:build windows - -package idtools - -import ( - "os" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := os.MkdirAll(path, mode); err != nil { - return err - } - return nil -} - -// CanAccess takes a valid (existing) directory and a uid, gid pair and determines -// if that uid, gid pair has access (execute bit) to the directory -// Windows does not require/support this function, so always return true -func CanAccess(path string, pair IDPair) bool { - return true -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/parser.go b/vendor/github.com/containers/storage/pkg/idtools/parser.go deleted file mode 100644 index 042d0ea957..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/parser.go +++ /dev/null @@ -1,59 +0,0 @@ -package idtools - -import ( - "fmt" - "math" - "math/bits" - "strconv" - "strings" -) - -func parseTriple(spec []string) (container, host, size uint32, err error) { - cid, err := strconv.ParseUint(spec[0], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[0], err) - } - hid, err := strconv.ParseUint(spec[1], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[1], err) - } - sz, err := strconv.ParseUint(spec[2], 10, 32) - if err != nil { - return 0, 0, 0, fmt.Errorf("parsing id map value %q: %w", spec[2], err) - } - return uint32(cid), uint32(hid), uint32(sz), nil -} - -// ParseIDMap parses idmap triples from string. -func ParseIDMap(mapSpec []string, mapSetting string) (idmap []IDMap, err error) { - stdErr := fmt.Errorf("initializing ID mappings: %s setting is malformed expected [\"uint32:uint32:uint32\"]: %q", mapSetting, mapSpec) - for _, idMapSpec := range mapSpec { - if idMapSpec == "" { - continue - } - idSpec := strings.Split(idMapSpec, ":") - if len(idSpec)%3 != 0 { - return nil, stdErr - } - for i := range idSpec { - if i%3 != 0 { - continue - } - cid, hid, size, err := parseTriple(idSpec[i : i+3]) - if err != nil { - return nil, stdErr - } - // Avoid possible integer overflow on 32bit builds - if bits.UintSize == 32 && (cid > math.MaxInt32 || hid > math.MaxInt32 || size > math.MaxInt32) { - return nil, stdErr - } - mapping := IDMap{ - ContainerID: int(cid), - HostID: int(hid), - Size: int(size), - } - idmap = append(idmap, mapping) - } - } - return idmap, nil -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index ac27718de2..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,164 +0,0 @@ -package idtools - -import ( - "fmt" - "sort" - "strconv" - "strings" - "sync" - - "github.com/containers/storage/pkg/regexp" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.Delayed(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("adding user %q: %w", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("trying to find uid/gid for new user %q: %w", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("can't convert found uid (%s) to int: %w", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("can't convert found gid (%s) to int: %w", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("couldn't create subordinate ID ranges: %w", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("failed to add user with error: %w; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := readSubuid(name) - if err != nil { - return fmt.Errorf("while looking for subuid ranges for user %q: %w", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("can't find available subuid range: %w", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("unable to add subuid range to user: %q; output: %s, err: %w", name, out, err) - } - } - - ranges, err = readSubgid(name) - if err != nil { - return fmt.Errorf("while looking for subgid ranges for user %q: %w", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("can't find available subgid range: %w", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("unable to add subgid range to user: %q; output: %s, err: %w", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := readSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subuid file: %w", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := readSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("couldn't parse all ranges in /etc/subgid file: %w", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go b/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index e37c4540c3..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -//go:build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("no support for adding users or groups on this OS") -} diff --git a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go b/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go deleted file mode 100644 index f34462a23a..0000000000 --- a/vendor/github.com/containers/storage/pkg/idtools/utils_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build !windows - -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - // only return no error if the final resolved binary basename - // matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go b/vendor/github.com/containers/storage/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e19..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go b/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go deleted file mode 100644 index cf60580359..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,183 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := min(b.Cap()*2, maxCap) - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() any { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go deleted file mode 100644 index fd6addd73b..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,284 +0,0 @@ -package ioutils - -import ( - "io" - "os" - "path/filepath" - "time" -) - -// AtomicFileWriterOptions specifies options for creating the atomic file writer. -type AtomicFileWriterOptions struct { - // NoSync specifies whether the sync call must be skipped for the file. - // If NoSync is not specified, the file is synced to the - // storage after it has been written and before it is moved to - // the specified path. - NoSync bool - // On successful return from Close() this is set to the mtime of the - // newly written file. - ModTime time.Time - // Specifies whether Commit() must be explicitly called to write state - // to the destination. This allows an application to preserve the original - // file when an error occurs during processing (and not just during write) - // The default is false, which will auto-commit on Close - ExplicitCommit bool -} - -type CommittableWriter interface { - io.WriteCloser - - // Commit closes the temporary file associated with this writer, and - // provided no errors (during commit or previously during write operations), - // will publish the completed file under the intended destination. - Commit() error -} - -var defaultWriterOptions = AtomicFileWriterOptions{} - -// SetDefaultOptions overrides the default options used when creating an -// atomic file writer. -func SetDefaultOptions(opts AtomicFileWriterOptions) { - defaultWriterOptions = opts -} - -// NewAtomicFileWriterWithOpts returns a CommittableWriter so that writing to it -// writes to a temporary file, which can later be committed to a destination path, -// either by Closing in the case of auto-commit, or manually calling commit if the -// ExplicitCommit option is enabled. Writing and closing concurrently is not -// allowed. -func NewAtomicFileWriterWithOpts(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (CommittableWriter, error) { - return newAtomicFileWriter(filename, perm, opts) -} - -// newAtomicFileWriter returns a CommittableWriter so that writing to it writes to -// a temporary file, which can later be committed to a destination path, either by -// Closing in the case of auto-commit, or manually calling commit if the -// ExplicitCommit option is enabled. Writing and closing concurrently is not allowed. -func newAtomicFileWriter(filename string, perm os.FileMode, opts *AtomicFileWriterOptions) (*atomicFileWriter, error) { - f, err := os.CreateTemp(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - if opts == nil { - opts = &defaultWriterOptions - } - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - noSync: opts.NoSync, - explicitCommit: opts.ExplicitCommit, - }, nil -} - -// NewAtomicFileWriterWithOpts returns a CommittableWriter, with auto-commit enabled. -// Writing to it writes to a temporary file and closing it atomically changes the -// temporary file to destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (CommittableWriter, error) { - return NewAtomicFileWriterWithOpts(filename, perm, nil) -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFileWithOpts(filename string, data []byte, perm os.FileMode, opts *AtomicFileWriterOptions) error { - f, err := newAtomicFileWriter(filename, perm, opts) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - - if opts != nil { - opts.ModTime = f.modTime - } - - return err -} - -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - return AtomicWriteFileWithOpts(filename, data, perm, nil) -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode - noSync bool - modTime time.Time - closed bool - explicitCommit bool -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) closeTempFile() error { - if w.closed { - return nil - } - - w.closed = true - return w.f.Close() -} - -func (w *atomicFileWriter) Close() error { - return w.complete(!w.explicitCommit) -} - -func (w *atomicFileWriter) Commit() error { - return w.complete(true) -} - -func (w *atomicFileWriter) complete(commit bool) (retErr error) { - if w == nil || w.closed { - return nil - } - - defer func() { - err := w.closeTempFile() - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - if retErr == nil { - retErr = err - } - }() - - if commit { - return w.commitState() - } - - return nil -} - -func (w *atomicFileWriter) commitState() error { - // Perform a data only sync (fdatasync()) if supported - if err := w.postDataWrittenSync(); err != nil { - return err - } - - // Capture fstat before closing the fd - info, err := w.f.Stat() - if err != nil { - return err - } - w.modTime = info.ModTime() - - if err := w.f.Chmod(w.perm); err != nil { - return err - } - - // Perform full sync on platforms that need it - if err := w.preRenameSync(); err != nil { - return err - } - - // Some platforms require closing before rename (Windows) - if err := w.closeTempFile(); err != nil { - return err - } - - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - - return nil -} - -// AtomicWriteSet is used to atomically write a set -// of files and ensure they are visible at the same time. -// Must be committed to a new directory. -type AtomicWriteSet struct { - root string -} - -// NewAtomicWriteSet creates a new atomic write set to -// atomically create a set of files. The given directory -// is used as the base directory for storing files before -// commit. If no temporary directory is given the system -// default is used. -func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { - td, err := os.MkdirTemp(tmpDir, "write-set-") - if err != nil { - return nil, err - } - - return &AtomicWriteSet{ - root: td, - }, nil -} - -// WriteFile writes a file to the set, guaranteeing the file -// has been synced. -func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type syncFileCloser struct { - *os.File -} - -func (w syncFileCloser) Close() error { - if !defaultWriterOptions.NoSync { - return w.File.Close() - } - err := dataOrFullSync(w.File) - if err1 := w.File.Close(); err == nil { - err = err1 - } - return err -} - -// FileWriter opens a file writer inside the set. The file -// should be synced and closed before calling commit. -func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) - if err != nil { - return nil, err - } - return syncFileCloser{f}, nil -} - -// Cancel cancels the set and removes all temporary data -// created in the set. -func (ws *AtomicWriteSet) Cancel() error { - return os.RemoveAll(ws.root) -} - -// Commit moves all created files to the target directory. The -// target directory must not exist and the parent of the target -// directory must exist. -func (ws *AtomicWriteSet) Commit(target string) error { - return os.Rename(ws.root, target) -} - -// String returns the location the set is writing to. -func (ws *AtomicWriteSet) String() string { - return ws.root -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go deleted file mode 100644 index 10ed48cfd8..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_linux.go +++ /dev/null @@ -1,23 +0,0 @@ -package ioutils - -import ( - "os" - - "golang.org/x/sys/unix" -) - -func dataOrFullSync(f *os.File) error { - return unix.Fdatasync(int(f.Fd())) -} - -func (w *atomicFileWriter) postDataWrittenSync() error { - if w.noSync { - return nil - } - return unix.Fdatasync(int(w.f.Fd())) -} - -func (w *atomicFileWriter) preRenameSync() error { - // On Linux data can be reliably flushed to media without metadata, so defer - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go b/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go deleted file mode 100644 index 2ccdc31088..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/fswriters_other.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build !linux - -package ioutils - -import ( - "os" -) - -func dataOrFullSync(f *os.File) error { - return f.Sync() -} - -func (w *atomicFileWriter) postDataWrittenSync() error { - // many platforms (Mac, Windows) require a full sync to reliably flush to media - return nil -} - -func (w *atomicFileWriter) preRenameSync() error { - if w.noSync { - return nil - } - - // fsync() on Non-linux Unix, FlushFileBuffers (Windows), F_FULLFSYNC (Mac) - return w.f.Sync() -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/readers.go b/vendor/github.com/containers/storage/pkg/ioutils/readers.go deleted file mode 100644 index 146e1a5ff0..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/readers.go +++ /dev/null @@ -1,170 +0,0 @@ -package ioutils - -import ( - "context" - "crypto/sha256" - "encoding/hex" - "io" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -type readWriteToCloserWrapper struct { - io.Reader - io.WriterTo - closer func() error -} - -func (r *readWriteToCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - if wt, ok := r.(io.WriterTo); ok { - return &readWriteToCloserWrapper{ - Reader: r, - WriterTo: wt, - closer: closer, - } - } - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go deleted file mode 100644 index 257b064c5f..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !windows - -package ioutils - -import "os" - -// TempDir on Unix systems is equivalent to os.MkdirTemp. -func TempDir(dir, prefix string) (string, error) { - return os.MkdirTemp(dir, prefix) -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go b/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go deleted file mode 100644 index 79837fb33e..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -//go:build windows - -package ioutils - -import ( - "os" - - "github.com/containers/storage/pkg/longpath" -) - -// TempDir is the equivalent of os.MkdirTemp, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := os.MkdirTemp(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go b/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go deleted file mode 100644 index 52a4901ade..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/vendor/github.com/containers/storage/pkg/ioutils/writers.go b/vendor/github.com/containers/storage/pkg/ioutils/writers.go deleted file mode 100644 index 0b6d0a7a6d..0000000000 --- a/vendor/github.com/containers/storage/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(w io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: w, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go b/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go deleted file mode 100644 index 93fb1fea87..0000000000 --- a/vendor/github.com/containers/storage/pkg/lockfile/lastwrite.go +++ /dev/null @@ -1,82 +0,0 @@ -package lockfile - -import ( - "bytes" - cryptorand "crypto/rand" - "encoding/binary" - "os" - "sync/atomic" - "time" -) - -// LastWrite is an opaque identifier of the last write to some *LockFile. -// It can be used by users of a *LockFile to determine if the lock indicates changes -// since the last check. -// -// Never construct a LastWrite manually; only accept it from *LockFile methods, and pass it back. -type LastWrite struct { - // Never modify fields of a LastWrite object; it has value semantics. - state []byte // Contents of the lock file. -} - -var lastWriterIDCounter uint64 // Private state for newLastWriterID - -const lastWriterIDSize = 64 // This must be the same as len(stringid.GenerateRandomID) -// newLastWrite returns a new "last write" ID. -// The value must be different on every call, and also differ from values -// generated by other processes. -func newLastWrite() LastWrite { - // The ID is (PID, time, per-process counter, random) - // PID + time represents both a unique process across reboots, - // and a specific time within the process; the per-process counter - // is an extra safeguard for in-process concurrency. - // The random part disambiguates across process namespaces - // (where PID values might collide), serves as a general-purpose - // extra safety, _and_ is used to pad the output to lastWriterIDSize, - // because other versions of this code exist and they don't work - // efficiently if the size of the value changes. - pid := os.Getpid() - tm := time.Now().UnixNano() - counter := atomic.AddUint64(&lastWriterIDCounter, 1) - - res := make([]byte, lastWriterIDSize) - binary.LittleEndian.PutUint64(res[0:8], uint64(tm)) - binary.LittleEndian.PutUint64(res[8:16], counter) - binary.LittleEndian.PutUint32(res[16:20], uint32(pid)) - if n, err := cryptorand.Read(res[20:lastWriterIDSize]); err != nil || n != lastWriterIDSize-20 { - panic(err) // This shouldn't happen - } - - return LastWrite{ - state: res, - } -} - -// serialize returns bytes to write to the lock file to represent the specified write. -func (lw LastWrite) serialize() []byte { - if lw.state == nil { - panic("LastWrite.serialize on an uninitialized object") - } - return lw.state -} - -// Equals returns true if lw matches other -func (lw LastWrite) equals(other LastWrite) bool { - if lw.state == nil { - panic("LastWrite.equals on an uninitialized object") - } - if other.state == nil { - panic("LastWrite.equals with an uninitialized counterparty") - } - return bytes.Equal(lw.state, other.state) -} - -// newLastWriteFromData returns a LastWrite corresponding to data that came from a previous LastWrite.serialize -func newLastWriteFromData(serialized []byte) LastWrite { - if serialized == nil { - panic("newLastWriteFromData with nil data") - } - return LastWrite{ - state: serialized, - } -} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go deleted file mode 100644 index dfe81c2458..0000000000 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile.go +++ /dev/null @@ -1,447 +0,0 @@ -package lockfile - -import ( - "fmt" - "os" - "path/filepath" - "sync" - "time" - - "github.com/containers/storage/internal/rawfilelock" -) - -// A Locker represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// Deprecated: Refer directly to *LockFile, the provided implementation, instead. -type Locker interface { - // Acquire a writer lock. - // The default unix implementation panics if: - // - opening the lockfile failed - // - tried to lock a read-only lock-file - Lock() - - // Unlock the lock. - // The default unix implementation panics if: - // - unlocking an unlocked lock - // - if the lock counter is corrupted - Unlock() - - // Acquire a reader lock. - RLock() - - // Touch records, for others sharing the lock, that the caller was the - // last writer. It should only be called with the lock held. - // - // Deprecated: Use *LockFile.RecordWrite. - Touch() error - - // Modified() checks if the most recent writer was a party other than the - // last recorded writer. It should only be called with the lock held. - // Deprecated: Use *LockFile.ModifiedSince. - Modified() (bool, error) - - // TouchedSince() checks if the most recent writer modified the file (likely using Touch()) after the specified time. - TouchedSince(when time.Time) bool - - // IsReadWrite() checks if the lock file is read-write - IsReadWrite() bool - - // AssertLocked() can be used by callers that _know_ that they hold the lock (for reading or writing), for sanity checking. - // It might do nothing at all, or it may panic if the caller is not the owner of this lock. - AssertLocked() - - // AssertLockedForWriting() can be used by callers that _know_ that they hold the lock locked for writing, for sanity checking. - // It might do nothing at all, or it may panic if the caller is not the owner of this lock for writing. - AssertLockedForWriting() -} - -// LockFile represents a file lock where the file is used to cache an -// identifier of the last party that made changes to whatever's being protected -// by the lock. -// -// It MUST NOT be created manually. Use GetLockFile or GetROLockFile instead. -type LockFile struct { - // The following fields are only set when constructing *LockFile, and must never be modified afterwards. - // They are safe to access without any other locking. - file string - ro bool - - // rwMutex serializes concurrent reader-writer acquisitions in the same process space - rwMutex *sync.RWMutex - // stateMutex is used to synchronize concurrent accesses to the state below - stateMutex *sync.Mutex - counter int64 - lw LastWrite // A global value valid as of the last .Touch() or .Modified() - lockType rawfilelock.LockType - locked bool - // The following fields are only modified on transitions between counter == 0 / counter != 0. - // Thus, they can be safely accessed by users _that currently hold the LockFile_ without locking. - // In other cases, they need to be protected using stateMutex. - fd rawfilelock.FileHandle -} - -var ( - lockFiles map[string]*LockFile - lockFilesLock sync.Mutex -) - -// GetLockFile opens a read-write lock file, creating it if necessary. The -// *LockFile object may already be locked if the path has already been requested -// by the current process. -func GetLockFile(path string) (*LockFile, error) { - return getLockfile(path, false) -} - -// GetLockfile opens a read-write lock file, creating it if necessary. The -// Locker object may already be locked if the path has already been requested -// by the current process. -// -// Deprecated: Use GetLockFile -func GetLockfile(path string) (Locker, error) { - return GetLockFile(path) -} - -// GetROLockFile opens a read-only lock file, creating it if necessary. The -// *LockFile object may already be locked if the path has already been requested -// by the current process. -func GetROLockFile(path string) (*LockFile, error) { - return getLockfile(path, true) -} - -// GetROLockfile opens a read-only lock file, creating it if necessary. The -// Locker object may already be locked if the path has already been requested -// by the current process. -// -// Deprecated: Use GetROLockFile -func GetROLockfile(path string) (Locker, error) { - return GetROLockFile(path) -} - -// Lock locks the lockfile as a writer. Panic if the lock is a read-only one. -func (l *LockFile) Lock() { - if l.ro { - panic("can't take write lock on read-only lock file") - } - l.lock(rawfilelock.WriteLock) -} - -// RLock locks the lockfile as a reader. -func (l *LockFile) RLock() { - l.lock(rawfilelock.ReadLock) -} - -// TryLock attempts to lock the lockfile as a writer. Panic if the lock is a read-only one. -func (l *LockFile) TryLock() error { - if l.ro { - panic("can't take write lock on read-only lock file") - } - return l.tryLock(rawfilelock.WriteLock) -} - -// TryRLock attempts to lock the lockfile as a reader. -func (l *LockFile) TryRLock() error { - return l.tryLock(rawfilelock.ReadLock) -} - -// Unlock unlocks the lockfile. -func (l *LockFile) Unlock() { - l.stateMutex.Lock() - if !l.locked { - // Panic when unlocking an unlocked lock. That's a violation - // of the lock semantics and will reveal such. - panic("calling Unlock on unlocked lock") - } - l.counter-- - if l.counter < 0 { - // Panic when the counter is negative. There is no way we can - // recover from a corrupted lock and we need to protect the - // storage from corruption. - panic(fmt.Sprintf("lock %q has been unlocked too often", l.file)) - } - if l.counter == 0 { - // We should only release the lock when the counter is 0 to - // avoid releasing read-locks too early; a given process may - // acquire a read lock multiple times. - l.locked = false - // Close the file descriptor on the last unlock, releasing the - // file lock. - rawfilelock.UnlockAndCloseHandle(l.fd) - } - if l.lockType == rawfilelock.ReadLock { - l.rwMutex.RUnlock() - } else { - l.rwMutex.Unlock() - } - l.stateMutex.Unlock() -} - -func (l *LockFile) AssertLocked() { - // DO NOT provide a variant that returns the value of l.locked. - // - // If the caller does not hold the lock, l.locked might nevertheless be true because another goroutine does hold it, and - // we can’t tell the difference. - // - // Hence, this “AssertLocked” method, which exists only for sanity checks. - - // Don’t even bother with l.stateMutex: The caller is expected to hold the lock, and in that case l.locked is constant true - // with no possible writers. - // If the caller does not hold the lock, we are violating the locking/memory model anyway, and accessing the data - // without the lock is more efficient for callers, and potentially more visible to lock analysers for incorrect callers. - if !l.locked { - panic("internal error: lock is not held by the expected owner") - } -} - -func (l *LockFile) AssertLockedForWriting() { - // DO NOT provide a variant that returns the current lock state. - // - // The same caveats as for AssertLocked apply equally. - - l.AssertLocked() - // Like AssertLocked, don’t even bother with l.stateMutex. - if l.lockType == rawfilelock.ReadLock { - panic("internal error: lock is not held for writing") - } -} - -// ModifiedSince checks if the lock has been changed since a provided LastWrite value, -// and returns the one to record instead. -// -// If ModifiedSince reports no modification, the previous LastWrite value -// is still valid and can continue to be used. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should fail and keep using the previously-recorded LastWrite value, -// so that it continues failing until the situation is resolved. Similarly, -// it should only update the recorded LastWrite value after processing the update: -// -// lw2, modified, err := state.lock.ModifiedSince(state.lastWrite) -// if err != nil { /* fail */ } -// state.lastWrite = lw2 -// if modified { -// if err := reload(); err != nil { /* fail */ } -// state.lastWrite = lw2 -// } -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) ModifiedSince(previous LastWrite) (LastWrite, bool, error) { - l.AssertLocked() - currentLW, err := l.GetLastWrite() - if err != nil { - return LastWrite{}, false, err - } - modified := !previous.equals(currentLW) - return currentLW, modified, nil -} - -// Modified indicates if the lockfile has been updated since the last time it -// was loaded. -// NOTE: Unlike ModifiedSince, this returns true the first time it is called on a *LockFile. -// Callers cannot, in general, rely on this, because that might have happened for some other -// owner of the same *LockFile who created it previously. -// -// Deprecated: Use *LockFile.ModifiedSince. -func (l *LockFile) Modified() (bool, error) { - l.stateMutex.Lock() - if !l.locked { - panic("attempted to check last-writer in lockfile without locking it first") - } - defer l.stateMutex.Unlock() - oldLW := l.lw - // Note that this is called with stateMutex held; that’s fine because ModifiedSince doesn’t need to lock it. - currentLW, modified, err := l.ModifiedSince(oldLW) - if err != nil { - return true, err - } - l.lw = currentLW - return modified, nil -} - -// Touch updates the lock file with to record that the current lock holder has modified the lock-protected data. -// -// Deprecated: Use *LockFile.RecordWrite. -func (l *LockFile) Touch() error { - lw, err := l.RecordWrite() - if err != nil { - return err - } - l.stateMutex.Lock() - if !l.locked || (l.lockType == rawfilelock.ReadLock) { - panic("attempted to update last-writer in lockfile without the write lock") - } - defer l.stateMutex.Unlock() - l.lw = lw - return nil -} - -// IsReadWrite indicates if the lock file is a read-write lock. -func (l *LockFile) IsReadWrite() bool { - return !l.ro -} - -// getLockFile returns a *LockFile object, possibly (depending on the platform) -// working inter-process, and associated with the specified path. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func getLockfile(path string, ro bool) (*LockFile, error) { - lockFilesLock.Lock() - defer lockFilesLock.Unlock() - if lockFiles == nil { - lockFiles = make(map[string]*LockFile) - } - cleanPath, err := filepath.Abs(path) - if err != nil { - return nil, fmt.Errorf("ensuring that path %q is an absolute path: %w", path, err) - } - if lockFile, ok := lockFiles[cleanPath]; ok { - if ro && lockFile.IsReadWrite() { - return nil, fmt.Errorf("lock %q is not a read-only lock", cleanPath) - } - if !ro && !lockFile.IsReadWrite() { - return nil, fmt.Errorf("lock %q is not a read-write lock", cleanPath) - } - return lockFile, nil - } - lockFile, err := createLockFileForPath(cleanPath, ro) // platform-dependent LockFile - if err != nil { - return nil, err - } - lockFiles[cleanPath] = lockFile - return lockFile, nil -} - -// openLock opens a lock file at the specified path, creating the parent directory if it does not exist. -func openLock(path string, readOnly bool) (rawfilelock.FileHandle, error) { - fd, err := rawfilelock.OpenLock(path, readOnly) - if err == nil { - return fd, nil - } - - // the directory of the lockfile seems to be removed, try to create it - if os.IsNotExist(err) { - if err := os.MkdirAll(filepath.Dir(path), 0o700); err != nil { - return fd, fmt.Errorf("creating lock file directory: %w", err) - } - - return openLock(path, readOnly) - } - return fd, &os.PathError{Op: "open", Path: path, Err: err} -} - -// createLockFileForPath returns new *LockFile object, possibly (depending on the platform) -// working inter-process and associated with the specified path. -// -// This function will be called at most once for each path value within a single process. -// -// If ro, the lock is a read-write lock and the returned *LockFile should correspond to the -// “lock for reading” (shared) operation; otherwise, the lock is either an exclusive lock, -// or a read-write lock and *LockFile should correspond to the “lock for writing” (exclusive) operation. -// -// WARNING: -// - The lock may or MAY NOT be inter-process. -// - There may or MAY NOT be an actual object on the filesystem created for the specified path. -// - Even if ro, the lock MAY be exclusive. -func createLockFileForPath(path string, ro bool) (*LockFile, error) { - // Check if we can open the lock. - fd, err := openLock(path, ro) - if err != nil { - return nil, err - } - rawfilelock.UnlockAndCloseHandle(fd) - - lType := rawfilelock.WriteLock - if ro { - lType = rawfilelock.ReadLock - } - - return &LockFile{ - file: path, - ro: ro, - - rwMutex: &sync.RWMutex{}, - stateMutex: &sync.Mutex{}, - lw: newLastWrite(), // For compatibility, the first call of .Modified() will always report a change. - lockType: lType, - locked: false, - }, nil -} - -// lock locks the lockfile via syscall based on the specified type and -// command. -func (l *LockFile) lock(lType rawfilelock.LockType) { - if lType == rawfilelock.ReadLock { - l.rwMutex.RLock() - } else { - l.rwMutex.Lock() - } - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - if l.counter == 0 { - // If we're the first reference on the lock, we need to open the file again. - fd, err := openLock(l.file, l.ro) - if err != nil { - panic(err) - } - l.fd = fd - - // Optimization: only use the (expensive) syscall when - // the counter is 0. In this case, we're either the first - // reader lock or a writer lock. - if err := rawfilelock.LockFile(l.fd, lType); err != nil { - panic(err) - } - } - l.lockType = lType - l.locked = true - l.counter++ -} - -// lock locks the lockfile via syscall based on the specified type and -// command. -func (l *LockFile) tryLock(lType rawfilelock.LockType) error { - var success bool - var rwMutexUnlocker func() - if lType == rawfilelock.ReadLock { - success = l.rwMutex.TryRLock() - rwMutexUnlocker = l.rwMutex.RUnlock - } else { - success = l.rwMutex.TryLock() - rwMutexUnlocker = l.rwMutex.Unlock - } - if !success { - return fmt.Errorf("resource temporarily unavailable") - } - l.stateMutex.Lock() - defer l.stateMutex.Unlock() - if l.counter == 0 { - // If we're the first reference on the lock, we need to open the file again. - fd, err := openLock(l.file, l.ro) - if err != nil { - rwMutexUnlocker() - return err - } - l.fd = fd - - // Optimization: only use the (expensive) syscall when - // the counter is 0. In this case, we're either the first - // reader lock or a writer lock. - if err = rawfilelock.TryLockFile(l.fd, lType); err != nil { - rawfilelock.CloseHandle(fd) - rwMutexUnlocker() - return err - } - } - l.lockType = lType - l.locked = true - l.counter++ - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go deleted file mode 100644 index 14c27c51fb..0000000000 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_unix.go +++ /dev/null @@ -1,66 +0,0 @@ -//go:build !windows - -package lockfile - -import ( - "time" - - "github.com/containers/storage/pkg/system" - "golang.org/x/sys/unix" -) - -// GetLastWrite returns a LastWrite value corresponding to current state of the lock. -// This is typically called before (_not after_) loading the state when initializing a consumer -// of the data protected by the lock. -// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. -// -// The caller must hold the lock (for reading or writing). -func (l *LockFile) GetLastWrite() (LastWrite, error) { - l.AssertLocked() - contents := make([]byte, lastWriterIDSize) - n, err := unix.Pread(int(l.fd), contents, 0) - if err != nil { - return LastWrite{}, err - } - // It is important to handle the partial read case, because - // the initial size of the lock file is zero, which is a valid - // state (no writes yet) - contents = contents[:n] - return newLastWriteFromData(contents), nil -} - -// RecordWrite updates the lock with a new LastWrite value, and returns the new value. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should keep using the previously-recorded LastWrite value, -// and possibly detecting its own modification as an external one: -// -// lw, err := state.lock.RecordWrite() -// if err != nil { /* fail */ } -// state.lastWrite = lw -// -// The caller must hold the lock for writing. -func (l *LockFile) RecordWrite() (LastWrite, error) { - l.AssertLockedForWriting() - lw := newLastWrite() - lockContents := lw.serialize() - n, err := unix.Pwrite(int(l.fd), lockContents, 0) - if err != nil { - return LastWrite{}, err - } - if n != len(lockContents) { - return LastWrite{}, unix.ENOSPC - } - return lw, nil -} - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *LockFile) TouchedSince(when time.Time) bool { - st, err := system.Fstat(int(l.fd)) - if err != nil { - return true - } - mtim := st.Mtim() - touched := time.Unix(mtim.Unix()) - return when.Before(touched) -} diff --git a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go b/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go deleted file mode 100644 index e66f7bfbbc..0000000000 --- a/vendor/github.com/containers/storage/pkg/lockfile/lockfile_windows.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build windows - -package lockfile - -import ( - "os" - "time" - - "golang.org/x/sys/windows" -) - -const ( - reserved = 0 - allBytes = ^uint32(0) -) - -// GetLastWrite returns a LastWrite value corresponding to current state of the lock. -// This is typically called before (_not after_) loading the state when initializing a consumer -// of the data protected by the lock. -// During the lifetime of the consumer, the consumer should usually call ModifiedSince instead. -// -// The caller must hold the lock (for reading or writing) before this function is called. -func (l *LockFile) GetLastWrite() (LastWrite, error) { - l.AssertLocked() - contents := make([]byte, lastWriterIDSize) - ol := new(windows.Overlapped) - var n uint32 - err := windows.ReadFile(windows.Handle(l.fd), contents, &n, ol) - if err != nil && err != windows.ERROR_HANDLE_EOF { - return LastWrite{}, err - } - // It is important to handle the partial read case, because - // the initial size of the lock file is zero, which is a valid - // state (no writes yet) - contents = contents[:n] - return newLastWriteFromData(contents), nil -} - -// RecordWrite updates the lock with a new LastWrite value, and returns the new value. -// -// If this function fails, the LastWriter value of the lock is indeterminate; -// the caller should keep using the previously-recorded LastWrite value, -// and possibly detecting its own modification as an external one: -// -// lw, err := state.lock.RecordWrite() -// if err != nil { /* fail */ } -// state.lastWrite = lw -// -// The caller must hold the lock for writing. -func (l *LockFile) RecordWrite() (LastWrite, error) { - l.AssertLockedForWriting() - lw := newLastWrite() - lockContents := lw.serialize() - ol := new(windows.Overlapped) - var n uint32 - err := windows.WriteFile(windows.Handle(l.fd), lockContents, &n, ol) - if err != nil { - return LastWrite{}, err - } - if int(n) != len(lockContents) { - return LastWrite{}, windows.ERROR_DISK_FULL - } - return lw, nil -} - -// TouchedSince indicates if the lock file has been touched since the specified time -func (l *LockFile) TouchedSince(when time.Time) bool { - stat, err := os.Stat(l.file) - if err != nil { - return true - } - return when.Before(stat.ModTime()) -} diff --git a/vendor/github.com/containers/storage/pkg/longpath/longpath.go b/vendor/github.com/containers/storage/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4c..0000000000 --- a/vendor/github.com/containers/storage/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags.go b/vendor/github.com/containers/storage/pkg/mount/flags.go deleted file mode 100644 index 40a229932b..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt, _, ok := strings.Cut(option, "=") - if !ok || !validFlags[opt] { - return nil, fmt.Errorf("invalid tmpfs option %q", opt) - } - if !dataCollisions[opt] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt] = true - } - } - - return newOptions, nil -} - -// ParseOptions parses fstab type mount options into mount() flags -// and device specific data -func ParseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := ParseOptions(options) - for _, o := range strings.Split(data, ",") { - opt, _, _ := strings.Cut(o, "=") - if !validFlags[opt] { - return 0, "", fmt.Errorf("invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go deleted file mode 100644 index 3ba99cf935..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,48 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MNT_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MNT_SYNCHRONOUS - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MNT_UPDATE - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MNT_NOATIME - - mntDetach = unix.MNT_FORCE - - NODIRATIME = 0 - NODEV = 0 - DIRSYNC = 0 - MANDLOCK = 0 - BIND = 0 - RBIND = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SLAVE = 0 - RSLAVE = 0 - SHARED = 0 - RSHARED = 0 - RELATIME = 0 - STRICTATIME = 0 -) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go b/vendor/github.com/containers/storage/pkg/mount/flags_linux.go deleted file mode 100644 index 0425d0dd63..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = unix.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = unix.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = unix.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = unix.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = unix.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = unix.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = unix.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = unix.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = unix.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = unix.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = unix.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = unix.MS_BIND | unix.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = unix.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = unix.MS_UNBINDABLE | unix.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = unix.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = unix.MS_PRIVATE | unix.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = unix.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = unix.MS_SLAVE | unix.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = unix.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = unix.MS_SHARED | unix.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = unix.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = unix.MS_STRICTATIME - - mntDetach = unix.MNT_DETACH -) diff --git a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go deleted file mode 100644 index e581d64eb9..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !linux && !freebsd - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 - mntDetach = 0 -) diff --git a/vendor/github.com/containers/storage/pkg/mount/mount.go b/vendor/github.com/containers/storage/pkg/mount/mount.go deleted file mode 100644 index 23c5c44ac0..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mount.go +++ /dev/null @@ -1,110 +0,0 @@ -package mount - -import ( - "sort" - "strconv" - "strings" -) - -// mountError holds an error from a mount or unmount operation -type mountError struct { - op string - source, target string - flags uintptr - data string - err error -} - -// Error returns a string representation of mountError -func (e *mountError) Error() string { - out := e.op + " " - - if e.source != "" { - out += e.source + ":" + e.target - } else { - out += e.target - } - - if e.flags != uintptr(0) { - out += ", flags: 0x" + strconv.FormatUint(uint64(e.flags), 16) - } - if e.data != "" { - out += ", data: " + e.data - } - - out += ": " + e.err.Error() - return out -} - -// Cause returns the underlying cause of the error -func (e *mountError) Cause() error { - return e.err -} - -// Unwrap returns the underlying cause of the error -func (e *mountError) Unwrap() error { - return e.err -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, data := ParseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return mount(device, target, mType, uintptr(flag), data) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := ParseOptions(options) - return mount(device, target, mType, uintptr(flag), data) -} - -// Unmount lazily unmounts a filesystem on supported platforms, otherwise -// does a normal unmount. -func Unmount(target string) error { - return unmount(target, mntDetach) -} - -// RecursiveUnmount unmounts the target and all mounts underneath, starting with -// the deepest mount first. -func RecursiveUnmount(target string) error { - mounts, err := GetMounts() - if err != nil { - return err - } - - // Make the deepest mount be first - sort.Slice(mounts, func(i, j int) bool { - return len(mounts[i].Mountpoint) > len(mounts[j].Mountpoint) - }) - - for i, m := range mounts { - if !strings.HasPrefix(m.Mountpoint, target) { - continue - } - if err := Unmount(m.Mountpoint); err != nil && i == len(mounts)-1 { - return err - // Ignore errors for submounts and continue trying to unmount others - // The final unmount should fail if there are any submounts remaining - } - } - return nil -} - -// ForceUnmount lazily unmounts a filesystem on supported platforms, -// otherwise does a normal unmount. -// -// Deprecated: please use Unmount instead, it is identical. -func ForceUnmount(target string) error { - return unmount(target, mntDetach) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go b/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go deleted file mode 100644 index 61d6d1c595..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,63 +0,0 @@ -//go:build freebsd && cgo - -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - options := []string{"fspath", target} - - if data != "" { - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - continue - } - name, val, _ := strings.Cut(x, "=") - options = append(options, name) - options = append(options, val) - } - } - - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("failed to call nmount: %s", reason) - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go b/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go deleted file mode 100644 index 594cd0881a..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,74 +0,0 @@ -package mount - -import ( - "golang.org/x/sys/unix" -) - -const ( - // ptypes is the set propagation types. - ptypes = unix.MS_SHARED | unix.MS_PRIVATE | unix.MS_SLAVE | unix.MS_UNBINDABLE - - // pflags is the full set valid flags for a change propagation call. - pflags = ptypes | unix.MS_REC | unix.MS_SILENT - - // broflags is the combination of bind and read only - broflags = unix.MS_BIND | unix.MS_RDONLY - - none = "none" -) - -// isremount returns true if either device name or flags identify a remount request, false otherwise. -func isremount(device string, flags uintptr) bool { - switch { - // We treat device "" and "none" as a remount request to provide compatibility with - // requests that don't explicitly set MS_REMOUNT such as those manipulating bind mounts. - case flags&unix.MS_REMOUNT != 0, device == "", device == none: - return true - default: - return false - } -} - -func mount(device, target, mType string, flags uintptr, data string) error { - oflags := flags &^ ptypes - if !isremount(device, flags) || data != "" { - // Initial call applying all non-propagation flags for mount - // or remount with changed data - if err := unix.Mount(device, target, mType, oflags, data); err != nil { - return &mountError{ - op: "mount", - source: device, - target: target, - flags: oflags, - data: data, - err: err, - } - } - } - - if flags&ptypes != 0 { - // Change the propagation type. - if err := unix.Mount("", target, "", flags&pflags, ""); err != nil { - return &mountError{ - op: "remount", - target: target, - flags: flags & pflags, - err: err, - } - } - } - - if oflags&broflags == broflags { - // Remount the bind to apply read only. - if err := unix.Mount("", target, "", oflags|unix.MS_REMOUNT, ""); err != nil { - return &mountError{ - op: "remount-ro", - target: target, - flags: oflags | unix.MS_REMOUNT, - err: err, - } - } - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go deleted file mode 100644 index b9dc82d3ff..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build !linux && !(freebsd && cgo) - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo.go deleted file mode 100644 index bb2da474f4..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo.go +++ /dev/null @@ -1,13 +0,0 @@ -package mount - -import ( - "github.com/moby/sys/mountinfo" -) - -type Info = mountinfo.Info - -var Mounted = mountinfo.Mounted - -func GetMounts() ([]*Info, error) { - return mountinfo.GetMounts(nil) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go b/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go deleted file mode 100644 index 2d9e75ea10..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,18 +0,0 @@ -package mount - -import ( - "fmt" - "os" - - "github.com/moby/sys/mountinfo" -) - -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return mountinfo.GetMountsFromReader(f, nil) -} diff --git a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go b/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 80922ad5ca..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,64 +0,0 @@ -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, SHARED) -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, RSHARED) -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, PRIVATE) -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, RPRIVATE) -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, SLAVE) -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, RSLAVE) -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, UNBINDABLE) -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, RUNBINDABLE) -} - -func ensureMountedAs(mnt string, flags int) error { - mounted, err := Mounted(mnt) - if err != nil { - return err - } - - if !mounted { - if err := mount(mnt, mnt, "none", uintptr(BIND), ""); err != nil { - return err - } - } - - return mount("", mnt, "none", uintptr(flags), "") -} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go deleted file mode 100644 index 331272e0ca..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unix.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build !windows - -package mount - -import ( - "time" - - "golang.org/x/sys/unix" -) - -func unmount(target string, flags int) error { - var err error - for range 50 { - err = unix.Unmount(target, flags) - switch err { - case unix.EBUSY: - time.Sleep(50 * time.Millisecond) - continue - case unix.EINVAL, nil: - // Ignore "not mounted" error here. Note the same error - // can be returned if flags are invalid, so this code - // assumes that the flags value is always correct. - return nil - } - break - } - - return &mountError{ - op: "umount", - target: target, - flags: uintptr(flags), - err: err, - } -} diff --git a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go b/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go deleted file mode 100644 index 3c942bfb20..0000000000 --- a/vendor/github.com/containers/storage/pkg/mount/unmount_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -//go:build windows - -package mount - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/README.md b/vendor/github.com/containers/storage/pkg/reexec/README.md deleted file mode 100644 index 6658f69b69..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go b/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go deleted file mode 100644 index 171cd81e75..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_freebsd.go +++ /dev/null @@ -1,37 +0,0 @@ -//go:build freebsd - -package reexec - -import ( - "context" - "os" - "os/exec" - - "golang.org/x/sys/unix" -) - -// Self returns the path to the current process's binary. -// Uses sysctl. -func Self() string { - path, err := unix.SysctlArgs("kern.proc.pathname", -1) - if err == nil { - return path - } - return os.Args[0] -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go b/vendor/github.com/containers/storage/pkg/reexec/command_linux.go deleted file mode 100644 index 025aef60a8..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_linux.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build linux - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which has Path as current binary. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go b/vendor/github.com/containers/storage/pkg/reexec/command_unix.go deleted file mode 100644 index eefddea413..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -//go:build solaris || darwin - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// CommandContext returns *exec.Cmd which has Path as current binary. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go b/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go deleted file mode 100644 index a78b548a5d..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !linux && !windows && !freebsd && !solaris && !darwin - -package reexec - -import ( - "context" - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - return nil -} - -// CommandContext is unsupported on operating systems apart from Linux, Windows, Solaris and Darwin. -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go b/vendor/github.com/containers/storage/pkg/reexec/command_windows.go deleted file mode 100644 index ba2f0f8477..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/command_windows.go +++ /dev/null @@ -1,34 +0,0 @@ -//go:build windows - -package reexec - -import ( - "context" - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.Command(Self()) - cmd.Args = args - return cmd -} - -// Command returns *exec.Cmd which has Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func CommandContext(ctx context.Context, args ...string) *exec.Cmd { - panicIfNotInitialized() - cmd := exec.CommandContext(ctx, Self()) - cmd.Args = args - return cmd -} diff --git a/vendor/github.com/containers/storage/pkg/reexec/reexec.go b/vendor/github.com/containers/storage/pkg/reexec/reexec.go deleted file mode 100644 index a1938cd4f3..0000000000 --- a/vendor/github.com/containers/storage/pkg/reexec/reexec.go +++ /dev/null @@ -1,66 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var ( - registeredInitializers = make(map[string]func()) - initWasCalled = false -) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - initWasCalled = true - if exists { - initializer() - - return true - } - return false -} - -func panicIfNotInitialized() { - if !initWasCalled { - // The reexec package is used to run subroutines in - // subprocesses which would otherwise have unacceptable side - // effects on the main thread. If you found this error, then - // your program uses a package which needs to do this. In - // order for that to work, main() should start with this - // boilerplate, or an equivalent: - // if reexec.Init() { - // return - // } - panic("a library subroutine needed to run a subprocess, but reexec.Init() was not called in main()") - } -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp.go b/vendor/github.com/containers/storage/pkg/regexp/regexp.go deleted file mode 100644 index 1a3333dba2..0000000000 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp.go +++ /dev/null @@ -1,234 +0,0 @@ -package regexp - -import ( - "io" - "regexp" - "sync" -) - -// Regexp is a wrapper struct used for wrapping MustCompile regex expressions -// used as global variables. Using this structure helps speed the startup time -// of apps that want to use global regex variables. This library initializes them on -// first use as opposed to the start of the executable. -type Regexp struct { - *regexpStruct -} - -type regexpStruct struct { - _ noCopy - once sync.Once - regexp *regexp.Regexp - val string -} - -func Delayed(val string) Regexp { - re := ®expStruct{ - val: val, - } - if precompile { - re.regexp = regexp.MustCompile(re.val) - } - return Regexp{re} -} - -func (re *regexpStruct) compile() { - if precompile { - return - } - re.once.Do(func() { - re.regexp = regexp.MustCompile(re.val) - }) -} - -func (re *regexpStruct) Expand(dst []byte, template []byte, src []byte, match []int) []byte { - re.compile() - return re.regexp.Expand(dst, template, src, match) -} - -func (re *regexpStruct) ExpandString(dst []byte, template string, src string, match []int) []byte { - re.compile() - return re.regexp.ExpandString(dst, template, src, match) -} - -func (re *regexpStruct) Find(b []byte) []byte { - re.compile() - return re.regexp.Find(b) -} - -func (re *regexpStruct) FindAll(b []byte, n int) [][]byte { - re.compile() - return re.regexp.FindAll(b, n) -} - -func (re *regexpStruct) FindAllIndex(b []byte, n int) [][]int { - re.compile() - return re.regexp.FindAllIndex(b, n) -} - -func (re *regexpStruct) FindAllString(s string, n int) []string { - re.compile() - return re.regexp.FindAllString(s, n) -} - -func (re *regexpStruct) FindAllStringIndex(s string, n int) [][]int { - re.compile() - return re.regexp.FindAllStringIndex(s, n) -} - -func (re *regexpStruct) FindAllStringSubmatch(s string, n int) [][]string { - re.compile() - return re.regexp.FindAllStringSubmatch(s, n) -} - -func (re *regexpStruct) FindAllStringSubmatchIndex(s string, n int) [][]int { - re.compile() - return re.regexp.FindAllStringSubmatchIndex(s, n) -} - -func (re *regexpStruct) FindAllSubmatch(b []byte, n int) [][][]byte { - re.compile() - return re.regexp.FindAllSubmatch(b, n) -} - -func (re *regexpStruct) FindAllSubmatchIndex(b []byte, n int) [][]int { - re.compile() - return re.regexp.FindAllSubmatchIndex(b, n) -} - -func (re *regexpStruct) FindIndex(b []byte) (loc []int) { - re.compile() - return re.regexp.FindIndex(b) -} - -func (re *regexpStruct) FindReaderIndex(r io.RuneReader) (loc []int) { - re.compile() - return re.regexp.FindReaderIndex(r) -} - -func (re *regexpStruct) FindReaderSubmatchIndex(r io.RuneReader) []int { - re.compile() - return re.regexp.FindReaderSubmatchIndex(r) -} - -func (re *regexpStruct) FindString(s string) string { - re.compile() - return re.regexp.FindString(s) -} - -func (re *regexpStruct) FindStringIndex(s string) (loc []int) { - re.compile() - return re.regexp.FindStringIndex(s) -} - -func (re *regexpStruct) FindStringSubmatch(s string) []string { - re.compile() - return re.regexp.FindStringSubmatch(s) -} - -func (re *regexpStruct) FindStringSubmatchIndex(s string) []int { - re.compile() - return re.regexp.FindStringSubmatchIndex(s) -} - -func (re *regexpStruct) FindSubmatch(b []byte) [][]byte { - re.compile() - return re.regexp.FindSubmatch(b) -} - -func (re *regexpStruct) FindSubmatchIndex(b []byte) []int { - re.compile() - return re.regexp.FindSubmatchIndex(b) -} - -func (re *regexpStruct) LiteralPrefix() (prefix string, complete bool) { - re.compile() - return re.regexp.LiteralPrefix() -} - -func (re *regexpStruct) Longest() { - re.compile() - re.regexp.Longest() -} - -func (re *regexpStruct) Match(b []byte) bool { - re.compile() - return re.regexp.Match(b) -} - -func (re *regexpStruct) MatchReader(r io.RuneReader) bool { - re.compile() - return re.regexp.MatchReader(r) -} - -func (re *regexpStruct) MatchString(s string) bool { - re.compile() - return re.regexp.MatchString(s) -} - -func (re *regexpStruct) NumSubexp() int { - re.compile() - return re.regexp.NumSubexp() -} - -func (re *regexpStruct) ReplaceAll(src, repl []byte) []byte { - re.compile() - return re.regexp.ReplaceAll(src, repl) -} - -func (re *regexpStruct) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte { - re.compile() - return re.regexp.ReplaceAllFunc(src, repl) -} - -func (re *regexpStruct) ReplaceAllLiteral(src, repl []byte) []byte { - re.compile() - return re.regexp.ReplaceAllLiteral(src, repl) -} - -func (re *regexpStruct) ReplaceAllLiteralString(src, repl string) string { - re.compile() - return re.regexp.ReplaceAllLiteralString(src, repl) -} - -func (re *regexpStruct) ReplaceAllString(src, repl string) string { - re.compile() - return re.regexp.ReplaceAllString(src, repl) -} - -func (re *regexpStruct) ReplaceAllStringFunc(src string, repl func(string) string) string { - re.compile() - return re.regexp.ReplaceAllStringFunc(src, repl) -} - -func (re *regexpStruct) Split(s string, n int) []string { - re.compile() - return re.regexp.Split(s, n) -} - -func (re *regexpStruct) String() string { - re.compile() - return re.regexp.String() -} - -func (re *regexpStruct) SubexpIndex(name string) int { - re.compile() - return re.regexp.SubexpIndex(name) -} - -func (re *regexpStruct) SubexpNames() []string { - re.compile() - return re.regexp.SubexpNames() -} - -// noCopy may be added to structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -// -// Note that it must not be embedded, due to the Lock and Unlock methods. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {} diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go deleted file mode 100644 index ccd9d0fb1f..0000000000 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp_dontprecompile.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build !regexp_precompile - -package regexp - -const precompile = false diff --git a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go b/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go deleted file mode 100644 index fe4421b019..0000000000 --- a/vendor/github.com/containers/storage/pkg/regexp/regexp_precompile.go +++ /dev/null @@ -1,5 +0,0 @@ -//go:build regexp_precompile - -package regexp - -const precompile = true diff --git a/vendor/github.com/containers/storage/pkg/system/chmod.go b/vendor/github.com/containers/storage/pkg/system/chmod.go deleted file mode 100644 index a01d8abfbd..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chmod.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "errors" - "os" - "syscall" -) - -func Chmod(name string, mode os.FileMode) error { - err := os.Chmod(name, mode) - - for err != nil && errors.Is(err, syscall.EINTR) { - err = os.Chmod(name, mode) - } - - return err -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes.go b/vendor/github.com/containers/storage/pkg/system/chtimes.go deleted file mode 100644 index 056d19954d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes.go +++ /dev/null @@ -1,35 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go b/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go deleted file mode 100644 index 892d56138d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build !windows - -package system - -import ( - "time" -) - -// setCTime will set the create time on a file. On Unix, the create -// time is updated as a side effect of setting the modified time, so -// no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go b/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go deleted file mode 100644 index f0d744eb83..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build windows - -package system - -import ( - "time" - - "golang.org/x/sys/windows" -) - -// setCTime will set the create time on a file. On Windows, this requires -// calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := windows.NsecToTimespec(ctime.UnixNano()) - pathp, e := windows.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := windows.CreateFile(pathp, - windows.FILE_WRITE_ATTRIBUTES, windows.FILE_SHARE_WRITE, nil, - windows.OPEN_EXISTING, windows.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer windows.Close(h) - c := windows.NsecToFiletime(windows.TimespecToNsec(ctimespec)) - return windows.SetFileTime(h, &c, nil, nil) -} diff --git a/vendor/github.com/containers/storage/pkg/system/errors.go b/vendor/github.com/containers/storage/pkg/system/errors.go deleted file mode 100644 index b87d419b57..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/errors.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import ( - "errors" -) - -// ErrNotSupportedPlatform means the platform is not supported. -var ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") diff --git a/vendor/github.com/containers/storage/pkg/system/exitcode.go b/vendor/github.com/containers/storage/pkg/system/exitcode.go deleted file mode 100644 index 60f0514b1d..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/exitcode.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "fmt" - "os/exec" - "syscall" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go b/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go deleted file mode 100644 index 1314058f17..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/extattr_freebsd.go +++ /dev/null @@ -1,93 +0,0 @@ -//go:build freebsd - -package system - -import ( - "os" - "unsafe" - - "golang.org/x/sys/unix" -) - -const ( - EXTATTR_NAMESPACE_EMPTY = unix.EXTATTR_NAMESPACE_EMPTY - EXTATTR_NAMESPACE_USER = unix.EXTATTR_NAMESPACE_USER - EXTATTR_NAMESPACE_SYSTEM = unix.EXTATTR_NAMESPACE_SYSTEM -) - -// ExtattrGetLink retrieves the value of the extended attribute identified by attrname -// in the given namespace and associated with the given path in the file system. -// If the path is a symbolic link, the extended attribute is retrieved from the link itself. -// Returns a []byte slice if the extattr is set and nil otherwise. -func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { - size, errno := unix.ExtattrGetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(nil)), 0) - if errno != nil { - if errno == unix.ENOATTR { - return nil, nil - } - return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} - } - if size == 0 { - return []byte{}, nil - } - - dest := make([]byte, size) - size, errno = unix.ExtattrGetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(&dest[0])), size) - if errno != nil { - return nil, &os.PathError{Op: "extattr_get_link", Path: path, Err: errno} - } - - return dest[:size], nil -} - -// ExtattrSetLink sets the value of extended attribute identified by attrname -// in the given namespace and associated with the given path in the file system. -// If the path is a symbolic link, the extended attribute is set on the link itself. -func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { - if len(data) == 0 { - data = []byte{} // ensure non-nil for empty data - } - if _, errno := unix.ExtattrSetLink(path, attrnamespace, attrname, - uintptr(unsafe.Pointer(&data[0])), len(data)); errno != nil { - return &os.PathError{Op: "extattr_set_link", Path: path, Err: errno} - } - - return nil -} - -// ExtattrListLink lists extended attributes associated with the given path -// in the specified namespace. If the path is a symbolic link, the attributes -// are listed from the link itself. -func ExtattrListLink(path string, attrnamespace int) ([]string, error) { - size, errno := unix.ExtattrListLink(path, attrnamespace, - uintptr(unsafe.Pointer(nil)), 0) - if errno != nil { - return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} - } - if size == 0 { - return []string{}, nil - } - - dest := make([]byte, size) - size, errno = unix.ExtattrListLink(path, attrnamespace, - uintptr(unsafe.Pointer(&dest[0])), size) - if errno != nil { - return nil, &os.PathError{Op: "extattr_list_link", Path: path, Err: errno} - } - - var attrs []string - for i := 0; i < size; { - // Each attribute is preceded by a single byte length - length := int(dest[i]) - i++ - if i+length > size { - break - } - attrs = append(attrs, string(dest[i:i+length])) - i += length - } - - return attrs, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go b/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go deleted file mode 100644 index 07b67357f3..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/extattr_unsupported.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build !freebsd - -package system - -const ( - EXTATTR_NAMESPACE_EMPTY = 0 - EXTATTR_NAMESPACE_USER = 0 - EXTATTR_NAMESPACE_SYSTEM = 0 -) - -// ExtattrGetLink is not supported on platforms other than FreeBSD. -func ExtattrGetLink(path string, attrnamespace int, attrname string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// ExtattrSetLink is not supported on platforms other than FreeBSD. -func ExtattrSetLink(path string, attrnamespace int, attrname string, data []byte) error { - return ErrNotSupportedPlatform -} - -// ExtattrListLink is not supported on platforms other than FreeBSD. -func ExtattrListLink(path string, attrnamespace int) ([]string, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/init.go b/vendor/github.com/containers/storage/pkg/system/init.go deleted file mode 100644 index 05642f6038..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/init.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "time" - "unsafe" -) - -// maxTime is used by chtimes. -var maxTime time.Time - -func init() { - // chtimes initialization - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/init_windows.go b/vendor/github.com/containers/storage/pkg/system/init_windows.go deleted file mode 100644 index 5f6fea1d37..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/init_windows.go +++ /dev/null @@ -1,16 +0,0 @@ -package system - -import "os" - -// LCOWSupported determines if Linux Containers on Windows are supported. -// Note: This feature is in development (06/17) and enabled through an -// environment variable. At a future time, it will be enabled based -// on build number. @jhowardmsft -var lcowSupported = false - -func init() { - // LCOW initialization - if os.Getenv("LCOW_SUPPORTED") != "" { - lcowSupported = true - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go b/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go deleted file mode 100644 index f9de938dd2..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lchflags_bsd.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build freebsd - -package system - -import ( - "unsafe" - - "golang.org/x/sys/unix" -) - -// Flag values from -const ( - /* - * Definitions of flags stored in file flags word. - * - * Super-user and owner changeable flags. - */ - UF_SETTABLE uint32 = 0x0000ffff /* mask of owner changeable flags */ - UF_NODUMP uint32 = 0x00000001 /* do not dump file */ - UF_IMMUTABLE uint32 = 0x00000002 /* file may not be changed */ - UF_APPEND uint32 = 0x00000004 /* writes to file may only append */ - UF_OPAQUE uint32 = 0x00000008 /* directory is opaque wrt. union */ - UF_NOUNLINK uint32 = 0x00000010 /* file may not be removed or renamed */ - - UF_SYSTEM uint32 = 0x00000080 /* Windows system file bit */ - UF_SPARSE uint32 = 0x00000100 /* sparse file */ - UF_OFFLINE uint32 = 0x00000200 /* file is offline */ - UF_REPARSE uint32 = 0x00000400 /* Windows reparse point file bit */ - UF_ARCHIVE uint32 = 0x00000800 /* file needs to be archived */ - UF_READONLY uint32 = 0x00001000 /* Windows readonly file bit */ - /* This is the same as the MacOS X definition of UF_HIDDEN. */ - UF_HIDDEN uint32 = 0x00008000 /* file is hidden */ - - /* - * Super-user changeable flags. - */ - SF_SETTABLE uint32 = 0xffff0000 /* mask of superuser changeable flags */ - SF_ARCHIVED uint32 = 0x00010000 /* file is archived */ - SF_IMMUTABLE uint32 = 0x00020000 /* file may not be changed */ - SF_APPEND uint32 = 0x00040000 /* writes to file may only append */ - SF_NOUNLINK uint32 = 0x00100000 /* file may not be removed or renamed */ - SF_SNAPSHOT uint32 = 0x00200000 /* snapshot inode */ -) - -func Lchflags(path string, flags uint32) error { - p, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - _, _, e1 := unix.Syscall(unix.SYS_LCHFLAGS, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) - if e1 != 0 { - return e1 - } - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/lchown.go b/vendor/github.com/containers/storage/pkg/system/lchown.go deleted file mode 100644 index eb2d8b464c..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lchown.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import ( - "os" - "syscall" -) - -func Lchown(name string, uid, gid int) error { - err := syscall.Lchown(name, uid, gid) - - for err == syscall.EINTR { - err = syscall.Lchown(name, uid, gid) - } - - if err != nil { - return &os.PathError{Op: "lchown", Path: name, Err: err} - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go b/vendor/github.com/containers/storage/pkg/system/lcow_unix.go deleted file mode 100644 index 037ccf59d2..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !windows - -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return false -} diff --git a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go b/vendor/github.com/containers/storage/pkg/system/lcow_windows.go deleted file mode 100644 index e54d01e696..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lcow_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package system - -// LCOWSupported returns true if Linux containers on Windows are supported. -func LCOWSupported() bool { - return lcowSupported -} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go b/vendor/github.com/containers/storage/pkg/system/lstat_unix.go deleted file mode 100644 index 826c1f9c36..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lstat_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -//go:build !windows - -package system - -import ( - "os" - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, &os.PathError{Op: "Lstat", Path: path, Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go b/vendor/github.com/containers/storage/pkg/system/lstat_windows.go deleted file mode 100644 index e51df0dafe..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/lstat_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package system - -import "os" - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return fromStatT(&fi) -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo.go b/vendor/github.com/containers/storage/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e67..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go b/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go deleted file mode 100644 index 589cbeba79..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_freebsd.go +++ /dev/null @@ -1,85 +0,0 @@ -//go:build freebsd && cgo - -package system - -import ( - "errors" - "fmt" - "unsafe" - - "golang.org/x/sys/unix" -) - -// #include -// #include -// #include -// #include -import "C" - -func getMemInfo() (int64, int64, error) { - data, err := unix.SysctlRaw("vm.vmtotal") - if err != nil { - return -1, -1, fmt.Errorf("can't get kernel info: %w", err) - } - if len(data) != C.sizeof_struct_vmtotal { - return -1, -1, fmt.Errorf("unexpected vmtotal size %d", len(data)) - } - - total := (*C.struct_vmtotal)(unsafe.Pointer(&data[0])) - - pagesize := int64(C.sysconf(C._SC_PAGESIZE)) - npages := int64(C.sysconf(C._SC_PHYS_PAGES)) - return pagesize * npages, pagesize * int64(total.t_free), nil -} - -func getSwapInfo() (int64, int64, error) { - var ( - total int64 = 0 - used int64 = 0 - ) - swapCount, err := unix.SysctlUint32("vm.nswapdev") - if err != nil { - return -1, -1, fmt.Errorf("reading vm.nswapdev: %w", err) - } - for i := 0; i < int(swapCount); i++ { - data, err := unix.SysctlRaw("vm.swap_info", i) - if err != nil { - return -1, -1, fmt.Errorf("reading vm.swap_info.%d: %w", i, err) - } - if len(data) != C.sizeof_struct_xswdev { - return -1, -1, fmt.Errorf("unexpected swap_info size %d", len(data)) - } - xsw := (*C.struct_xswdev)(unsafe.Pointer(&data[0])) - total += int64(xsw.xsw_nblks) - used += int64(xsw.xsw_used) - } - pagesize := int64(C.sysconf(C._SC_PAGESIZE)) - return pagesize * total, pagesize * (total - used), nil -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - MemTotal, MemFree, err := getMemInfo() - if err != nil { - return nil, fmt.Errorf("getting memory totals %w", err) - } - SwapTotal, SwapFree, err := getSwapInfo() - if err != nil { - return nil, fmt.Errorf("getting swap totals %w", err) - } - - if MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || SwapFree < 0 { - return nil, errors.New("getting system memory info") - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go b/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e73..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go b/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go deleted file mode 100644 index 17474e114a..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,129 +0,0 @@ -//go:build solaris && cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo CFLAGS: -std=c99 -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("getting system memory info %w", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go b/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go deleted file mode 100644 index db08642752..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -//go:build !linux && !windows && !solaris && !(freebsd && cgo) - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go b/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go deleted file mode 100644 index c833f30f76..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,46 +0,0 @@ -package system - -import ( - "unsafe" - - "golang.org/x/sys/windows" -) - -var ( - modkernel32 = windows.NewLazySystemDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod.go b/vendor/github.com/containers/storage/pkg/system/mknod.go deleted file mode 100644 index ff679c5b19..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !windows && !freebsd - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev uint32) error { - return unix.Mknod(path, mode, int(dev)) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go b/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go deleted file mode 100644 index d94353600a..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/mknod_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build freebsd - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev uint64) error { - return unix.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint64 { - return uint64(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go b/vendor/github.com/containers/storage/pkg/system/mknod_windows.go deleted file mode 100644 index 752f90b14f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/vendor/github.com/containers/storage/pkg/system/path.go b/vendor/github.com/containers/storage/pkg/system/path.go deleted file mode 100644 index ca076f2bc2..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import "runtime" - -const defaultUnixPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -func DefaultPathEnv(platform string) string { - if runtime.GOOS == "windows" { - if platform != runtime.GOOS && LCOWSupported() { - return defaultUnixPathEnv - } - // Deliberately empty on Windows containers on Windows as the default path will be set by - // the container. Docker has no context of what the default path should be. - return "" - } - return defaultUnixPathEnv -} diff --git a/vendor/github.com/containers/storage/pkg/system/path_unix.go b/vendor/github.com/containers/storage/pkg/system/path_unix.go deleted file mode 100644 index fc8de3e4dc..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !windows - -package system - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/path_windows.go b/vendor/github.com/containers/storage/pkg/system/path_windows.go deleted file mode 100644 index 8838d9fd28..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/path_windows.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be concatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("relative path not specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/process_unix.go b/vendor/github.com/containers/storage/pkg/system/process_unix.go deleted file mode 100644 index 5090f30424..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/process_unix.go +++ /dev/null @@ -1,24 +0,0 @@ -//go:build linux || freebsd || solaris || darwin - -package system - -import ( - "syscall" - - "golang.org/x/sys/unix" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := unix.Kill(pid, syscall.Signal(0)) - if err == nil || err == unix.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - _ = unix.Kill(pid, unix.SIGKILL) -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm.go b/vendor/github.com/containers/storage/pkg/system/rm.go deleted file mode 100644 index 12243707ac..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm.go +++ /dev/null @@ -1,99 +0,0 @@ -package system - -import ( - "errors" - "fmt" - "os" - "syscall" - "time" - - "github.com/containers/storage/pkg/mount" - "github.com/sirupsen/logrus" -) - -// EnsureRemoveAll wraps `os.RemoveAll` to check for specific errors that can -// often be remedied. -// Only use `EnsureRemoveAll` if you really want to make every effort to remove -// a directory. -// -// Because of the way `os.Remove` (and by extension `os.RemoveAll`) works, there -// can be a race between reading directory entries and then actually attempting -// to remove everything in the directory. -// These types of errors do not need to be returned since it's ok for the dir to -// be gone we can just retry the remove operation. -// -// This should not return a `os.ErrNotExist` kind of error under any circumstances -func EnsureRemoveAll(dir string) error { - notExistErr := make(map[string]bool) - - // track retries - exitOnErr := make(map[string]int) - maxRetry := 1000 - - // Attempt a simple remove all first, this avoids the more expensive - // RecursiveUnmount call if not needed. - if err := os.RemoveAll(dir); err == nil { - return nil - } - - // Attempt to unmount anything beneath this dir first - if err := mount.RecursiveUnmount(dir); err != nil { - logrus.Debugf("RecursiveUnmount on %s failed: %v", dir, err) - } - - for { - err := os.RemoveAll(dir) - if err == nil { - return nil - } - - // If the RemoveAll fails with a permission error, we - // may have immutable files so try to remove the - // immutable flag and redo the RemoveAll. - if errors.Is(err, syscall.EPERM) { - if err = resetFileFlags(dir); err != nil { - return fmt.Errorf("resetting file flags: %w", err) - } - err = os.RemoveAll(dir) - if err == nil { - return nil - } - } - - pe, ok := err.(*os.PathError) - if !ok { - return err - } - - if os.IsNotExist(err) { - if notExistErr[pe.Path] { - return err - } - notExistErr[pe.Path] = true - - // There is a race where some subdir can be removed but after the parent - // dir entries have been read. - // So the path could be from `os.Remove(subdir)` - // If the reported non-existent path is not the passed in `dir` we - // should just retry, but otherwise return with no error. - if pe.Path == dir { - return nil - } - continue - } - - if !IsEBUSY(pe.Err) { - return err - } - - if e := mount.Unmount(pe.Path); e != nil { - return fmt.Errorf("while removing %s: %w", dir, e) - } - - if exitOnErr[pe.Path] == maxRetry { - return err - } - exitOnErr[pe.Path]++ - time.Sleep(10 * time.Millisecond) - } -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_common.go b/vendor/github.com/containers/storage/pkg/system/rm_common.go deleted file mode 100644 index db214c4cd0..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm_common.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build !freebsd - -package system - -// Reset file flags in a directory tree. This allows EnsureRemoveAll -// to delete trees which have the immutable flag set. -func resetFileFlags(dir string) error { - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go b/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go deleted file mode 100644 index 39a5de7aa4..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/rm_freebsd.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -import ( - "io/fs" - "path/filepath" -) - -// Reset file flags in a directory tree. This allows EnsureRemoveAll -// to delete trees which have the immutable flag set. -func resetFileFlags(dir string) error { - return filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error { - if err := Lchflags(path, 0); err != nil { - return err - } - return nil - }) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_common.go b/vendor/github.com/containers/storage/pkg/system/stat_common.go deleted file mode 100644 index 1d57b7f401..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_common.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build !freebsd - -package system - -type platformStatT struct{} - -// Flags return file flags if supported or zero otherwise -func (s StatT) Flags() uint32 { - _ = s.platformStatT // Silence warnings that StatT.platformStatT is unused (on these platforms) - return 0 -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go b/vendor/github.com/containers/storage/pkg/system/stat_darwin.go deleted file mode 100644 index 57850a883f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_darwin.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go b/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go deleted file mode 100644 index 4b95073a3f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,28 +0,0 @@ -package system - -import "syscall" - -type platformStatT struct { - flags uint32 -} - -// Flags return file flags if supported or zero otherwise -func (s StatT) Flags() uint32 { - return s.flags -} - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - st := &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec, - dev: s.Dev, - } - st.flags = s.Flags - st.dev = s.Dev - return st, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_linux.go b/vendor/github.com/containers/storage/pkg/system/stat_linux.go deleted file mode 100644 index 0dee88d1b8..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), //nolint:unconvert - mtim: s.Mtim, - dev: uint64(s.Dev), //nolint:unconvert - }, nil -} - -// FromStatT converts a syscall.Stat_t type to a system.Stat_t type -// This is exposed on Linux as pkg/archive/changes uses it. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go deleted file mode 100644 index 715f05b938..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_netbsd.go +++ /dev/null @@ -1,13 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go b/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go deleted file mode 100644 index a413e17148..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go b/vendor/github.com/containers/storage/pkg/system/stat_solaris.go deleted file mode 100644 index a413e17148..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import "syscall" - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{ - size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim, - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_unix.go b/vendor/github.com/containers/storage/pkg/system/stat_unix.go deleted file mode 100644 index ffe45f32da..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_unix.go +++ /dev/null @@ -1,87 +0,0 @@ -//go:build !windows - -package system - -import ( - "os" - "strconv" - "syscall" - - "golang.org/x/sys/unix" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec - dev uint64 - platformStatT -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// Dev returns a unique identifier for owning filesystem -func (s StatT) Dev() uint64 { - return s.dev -} - -func (s StatT) IsDir() bool { - return (s.mode & unix.S_IFDIR) != 0 -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, &os.PathError{Op: "Stat", Path: path, Err: err} - } - return fromStatT(s) -} - -// Fstat takes an open file descriptor and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file descriptor is invalid -func Fstat(fd int) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Fstat(fd, s); err != nil { - return nil, &os.PathError{Op: "Fstat", Path: strconv.Itoa(fd), Err: err} - } - return fromStatT(s) -} diff --git a/vendor/github.com/containers/storage/pkg/system/stat_windows.go b/vendor/github.com/containers/storage/pkg/system/stat_windows.go deleted file mode 100644 index 828be20882..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/stat_windows.go +++ /dev/null @@ -1,74 +0,0 @@ -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like permission, size, etc about a file. -type StatT struct { - mode os.FileMode - size int64 - mtim time.Time - platformStatT -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return os.FileMode(s.mode) -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() time.Time { - return time.Time(s.mtim) -} - -// UID returns file's user id of owner. -// -// on windows this is always 0 because there is no concept of UID -func (s StatT) UID() uint32 { - return 0 -} - -// GID returns file's group id of owner. -// -// on windows this is always 0 because there is no concept of GID -func (s StatT) GID() uint32 { - return 0 -} - -// Dev returns a unique identifier for owning filesystem -func (s StatT) Dev() uint64 { - return 0 -} - -func (s StatT) IsDir() bool { - return s.Mode().IsDir() -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - fi, err := os.Stat(path) - if err != nil { - return nil, err - } - return fromStatT(&fi) -} - -// fromStatT converts a os.FileInfo type to a system.StatT type -func fromStatT(fi *os.FileInfo) (*StatT, error) { - return &StatT{ - size: (*fi).Size(), - mode: (*fi).Mode(), - mtim: (*fi).ModTime(), - }, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go b/vendor/github.com/containers/storage/pkg/system/syscall_unix.go deleted file mode 100644 index d1b41f34da..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/syscall_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -//go:build !windows - -package system - -import ( - "errors" - - "golang.org/x/sys/unix" -) - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return unix.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} - -// IsEBUSY checks if the specified error is EBUSY. -func IsEBUSY(err error) bool { - return errors.Is(err, unix.EBUSY) -} diff --git a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go b/vendor/github.com/containers/storage/pkg/system/syscall_windows.go deleted file mode 100644 index f4d8692cdb..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/syscall_windows.go +++ /dev/null @@ -1,127 +0,0 @@ -package system - -import ( - "unsafe" - - "github.com/sirupsen/logrus" - "golang.org/x/sys/windows" -) - -var ( - ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") - procGetProductInfo = modkernel32.NewProc("GetProductInfo") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = windows.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// IsIoTCore returns true if the currently running image is based off of -// Windows 10 IoT Core. -// @engine maintainers - this function should not be removed or modified as it -// is used to enforce licensing restrictions on Windows. -func IsIoTCore() bool { - var returnedProductType uint32 - r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType))) - if r1 == 0 { - logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err) - return false - } - const productIoTUAP = 0x0000007B - const productIoTUAPCommercial = 0x00000083 - return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := windows.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := windows.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer windows.LocalFree(windows.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(windows.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} - -// IsEBUSY checks if the specified error is EBUSY. -func IsEBUSY(err error) bool { - return false -} diff --git a/vendor/github.com/containers/storage/pkg/system/umask.go b/vendor/github.com/containers/storage/pkg/system/umask.go deleted file mode 100644 index 9b02a18873..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -//go:build !windows - -package system - -import ( - "golang.org/x/sys/unix" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return unix.Umask(newmask), nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/umask_windows.go b/vendor/github.com/containers/storage/pkg/system/umask_windows.go deleted file mode 100644 index c0b69ab1bf..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -//go:build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go b/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go deleted file mode 100644 index edc588a63f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go b/vendor/github.com/containers/storage/pkg/system/utimes_linux.go deleted file mode 100644 index edc588a63f..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/unix" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because unix.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - atFdCwd := unix.AT_FDCWD - - var _path *byte - _path, err := unix.BytePtrFromString(path) - if err != nil { - return err - } - if _, _, err := unix.Syscall6(unix.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), unix.AT_SYMLINK_NOFOLLOW, 0, 0); err != 0 && err != unix.ENOSYS { - return err - } - - return nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go b/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go deleted file mode 100644 index b6c36339df..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build !linux && !freebsd - -package system - -import "syscall" - -// LUtimesNano is only supported on linux and freebsd. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go b/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go deleted file mode 100644 index 27ada2083e..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_darwin.go +++ /dev/null @@ -1,84 +0,0 @@ -package system - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENOATTR: - return nil, nil - case errno != nil: - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - if err := unix.Lsetxattr(path, attr, data, flags); err != nil { - return &os.PathError{Op: "lsetxattr", Path: path, Err: err} - } - - return nil -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - dest := make([]byte, 128) - sz, errno := unix.Llistxattr(path, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Llistxattr(path, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - dest = make([]byte, sz) - sz, errno = unix.Llistxattr(path, dest) - } - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - var attrs []string - for _, token := range bytes.Split(dest[:sz], []byte{0}) { - if len(token) > 0 { - attrs = append(attrs, string(token)) - } - } - - return attrs, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go b/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go deleted file mode 100644 index 5d653976e5..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_freebsd.go +++ /dev/null @@ -1,85 +0,0 @@ -package system - -import ( - "strings" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP - - // Value is too small or too large for maximum size allowed - EOVERFLOW unix.Errno = unix.EOVERFLOW -) - -var ( - namespaceMap = map[string]int{ - "user": EXTATTR_NAMESPACE_USER, - "system": EXTATTR_NAMESPACE_SYSTEM, - } -) - -func xattrToExtattr(xattr string) (namespace int, extattr string, err error) { - namespaceName, extattr, found := strings.Cut(xattr, ".") - if !found { - return -1, "", ENOTSUP - } - - namespace, ok := namespaceMap[namespaceName] - if !ok { - return -1, "", ENOTSUP - } - return namespace, extattr, nil -} - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - namespace, extattr, err := xattrToExtattr(attr) - if err != nil { - return nil, err - } - return ExtattrGetLink(path, namespace, extattr) -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, value []byte, flags int) error { - if flags != 0 { - // FIXME: Flags are not supported on FreeBSD, but we can implement - // them mimicking the behavior of the Linux implementation. - // See lsetxattr(2) on Linux for more information. - return ENOTSUP - } - - namespace, extattr, err := xattrToExtattr(attr) - if err != nil { - return err - } - return ExtattrSetLink(path, namespace, extattr, value) -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - attrs := []string{} - - for namespaceName, namespace := range namespaceMap { - namespaceAttrs, err := ExtattrListLink(path, namespace) - if err != nil { - return nil, err - } - - for _, attr := range namespaceAttrs { - attrs = append(attrs, namespaceName+"."+attr) - } - } - - return attrs, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go b/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go deleted file mode 100644 index 12462cca33..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,87 +0,0 @@ -package system - -import ( - "bytes" - "os" - - "golang.org/x/sys/unix" -) - -const ( - // Value is larger than the maximum size allowed - E2BIG unix.Errno = unix.E2BIG - - // Operation not supported - ENOTSUP unix.Errno = unix.ENOTSUP - - // Value is too small or too large for maximum size allowed - EOVERFLOW unix.Errno = unix.EOVERFLOW -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// Returns a []byte slice if the xattr is set and nil otherwise. -func Lgetxattr(path string, attr string) ([]byte, error) { - // Start with a 128 length byte array - dest := make([]byte, 128) - sz, errno := unix.Lgetxattr(path, attr, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Lgetxattr(path, attr, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - dest = make([]byte, sz) - sz, errno = unix.Lgetxattr(path, attr, dest) - } - - switch { - case errno == unix.ENODATA: - return nil, nil - case errno != nil: - return nil, &os.PathError{Op: "lgetxattr", Path: path, Err: errno} - } - - return dest[:sz], nil -} - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - if err := unix.Lsetxattr(path, attr, data, flags); err != nil { - return &os.PathError{Op: "lsetxattr", Path: path, Err: err} - } - - return nil -} - -// Llistxattr lists extended attributes associated with the given path -// in the file system. -func Llistxattr(path string) ([]string, error) { - dest := make([]byte, 128) - sz, errno := unix.Llistxattr(path, dest) - - for errno == unix.ERANGE { - // Buffer too small, use zero-sized buffer to get the actual size - sz, errno = unix.Llistxattr(path, []byte{}) - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - dest = make([]byte, sz) - sz, errno = unix.Llistxattr(path, dest) - } - if errno != nil { - return nil, &os.PathError{Op: "llistxattr", Path: path, Err: errno} - } - - var attrs []string - for _, token := range bytes.Split(dest[:sz], []byte{0}) { - if len(token) > 0 { - attrs = append(attrs, string(token)) - } - } - - return attrs, nil -} diff --git a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go b/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 66bf5858f6..0000000000 --- a/vendor/github.com/containers/storage/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,31 +0,0 @@ -//go:build !linux && !darwin && !freebsd - -package system - -import "syscall" - -const ( - // Value is larger than the maximum size allowed - E2BIG syscall.Errno = syscall.Errno(0) - - // Operation not supported - ENOTSUP syscall.Errno = syscall.Errno(0) - - // Value is too small or too large for maximum size allowed - EOVERFLOW syscall.Errno = syscall.Errno(0) -) - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} - -// Llistxattr is not supported on platforms other than linux. -func Llistxattr(path string) ([]string, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go deleted file mode 100644 index 14aaeddcf9..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_cgo.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build linux && cgo - -package unshare - -import ( - "unsafe" -) - -/* -#cgo remoteclient CFLAGS: -Wall -Werror -#include -*/ -import "C" - -func getenv(name string) string { - cName := C.CString(name) - defer C.free(unsafe.Pointer(cName)) - - value := C.GoString(C.getenv(cName)) - - return value -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go b/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go deleted file mode 100644 index f970935b57..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/getenv_linux_nocgo.go +++ /dev/null @@ -1,11 +0,0 @@ -//go:build linux && !cgo - -package unshare - -import ( - "os" -) - -func getenv(name string) string { - return os.Getenv(name) -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.c b/vendor/github.com/containers/storage/pkg/unshare/unshare.c deleted file mode 100644 index a2800654f9..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.c +++ /dev/null @@ -1,379 +0,0 @@ -#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__linux__) - -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -/* Open Source projects like conda-forge, want to package podman and are based - off of centos:6, Conda-force has minimal libc requirements and is lacking - the memfd.h file, so we use mmam.h -*/ -#ifndef MFD_ALLOW_SEALING -#define MFD_ALLOW_SEALING 2U -#endif -#ifndef MFD_CLOEXEC -#define MFD_CLOEXEC 1U -#endif - -#ifndef F_LINUX_SPECIFIC_BASE -#define F_LINUX_SPECIFIC_BASE 1024 -#endif -#ifndef F_ADD_SEALS -#define F_ADD_SEALS (F_LINUX_SPECIFIC_BASE + 9) -#define F_GET_SEALS (F_LINUX_SPECIFIC_BASE + 10) -#endif -#ifndef F_SEAL_SEAL -#define F_SEAL_SEAL 0x0001LU -#endif -#ifndef F_SEAL_SHRINK -#define F_SEAL_SHRINK 0x0002LU -#endif -#ifndef F_SEAL_GROW -#define F_SEAL_GROW 0x0004LU -#endif -#ifndef F_SEAL_WRITE -#define F_SEAL_WRITE 0x0008LU -#endif - -#define BUFSTEP 1024 - -static const char *_max_user_namespaces = "/proc/sys/user/max_user_namespaces"; -static const char *_unprivileged_user_namespaces = "/proc/sys/kernel/unprivileged_userns_clone"; - -static int _containers_unshare_parse_envint(const char *envname) { - char *p, *q; - long l; - - p = getenv(envname); - if (p == NULL) { - return -1; - } - q = NULL; - l = strtol(p, &q, 10); - if ((q == NULL) || (*q != '\0')) { - fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); - _exit(1); - } - unsetenv(envname); - return l; -} - -static void _check_proc_sys_file(const char *path) -{ - FILE *fp; - char buf[32]; - size_t n_read; - long r; - - fp = fopen(path, "r"); - if (fp == NULL) { - if (errno != ENOENT) - fprintf(stderr, "Error reading %s: %m\n", _max_user_namespaces); - } else { - memset(buf, 0, sizeof(buf)); - n_read = fread(buf, 1, sizeof(buf) - 1, fp); - if (n_read > 0) { - r = atoi(buf); - if (r == 0) { - fprintf(stderr, "User namespaces are not enabled in %s.\n", path); - } - } else { - fprintf(stderr, "Error reading %s: no contents, should contain a number greater than 0.\n", path); - } - fclose(fp); - } -} - -static char **parse_proc_stringlist(const char *list) { - int fd, n, i, n_strings; - char *buf, *new_buf, **ret; - size_t size, new_size, used; - - fd = open(list, O_RDONLY); - if (fd == -1) { - return NULL; - } - buf = NULL; - size = 0; - used = 0; - for (;;) { - new_size = used + BUFSTEP; - new_buf = realloc(buf, new_size); - if (new_buf == NULL) { - free(buf); - fprintf(stderr, "realloc(%ld): out of memory\n", (long)(size + BUFSTEP)); - return NULL; - } - buf = new_buf; - size = new_size; - memset(buf + used, '\0', size - used); - n = read(fd, buf + used, size - used - 1); - if (n < 0) { - fprintf(stderr, "read(): %m\n"); - return NULL; - } - if (n == 0) { - break; - } - used += n; - } - close(fd); - n_strings = 0; - for (n = 0; n < used; n++) { - if ((n == 0) || (buf[n-1] == '\0')) { - n_strings++; - } - } - ret = calloc(n_strings + 1, sizeof(char *)); - if (ret == NULL) { - fprintf(stderr, "calloc(): out of memory\n"); - return NULL; - } - i = 0; - for (n = 0; n < used; n++) { - if ((n == 0) || (buf[n-1] == '\0')) { - ret[i++] = &buf[n]; - } - } - ret[i] = NULL; - return ret; -} - -/* - * Taken from the runc cloned_binary.c file - * Copyright (C) 2019 Aleksa Sarai - * Copyright (C) 2019 SUSE LLC - * - * This work is dual licensed under the following licenses. You may use, - * redistribute, and/or modify the work under the conditions of either (or - * both) licenses. - * - * === Apache-2.0 === - */ -static int try_bindfd(void) -{ - int fd, ret = -1; - char src[PATH_MAX] = {0}; - char template[64] = {0}; - - strncpy(template, "/tmp/containers.XXXXXX", sizeof(template) - 1); - - /* - * We need somewhere to mount it, mounting anything over /proc/self is a - * BAD idea on the host -- even if we do it temporarily. - */ - fd = mkstemp(template); - if (fd < 0) - return ret; - close(fd); - - ret = -EPERM; - - if (readlink("/proc/self/exe", src, sizeof (src) - 1) < 0) - goto out; - - if (mount(src, template, NULL, MS_BIND, NULL) < 0) - goto out; - if (mount(NULL, template, NULL, MS_REMOUNT | MS_BIND | MS_RDONLY, NULL) < 0) - goto out_umount; - - /* Get read-only handle that we're sure can't be made read-write. */ - ret = open(template, O_PATH | O_CLOEXEC); - -out_umount: - /* - * Make sure the MNT_DETACH works, otherwise we could get remounted - * read-write and that would be quite bad (the fd would be made read-write - * too, invalidating the protection). - */ - if (umount2(template, MNT_DETACH) < 0) { - if (ret >= 0) - close(ret); - ret = -ENOTRECOVERABLE; - } - -out: - /* - * We don't care about unlink errors, the worst that happens is that - * there's an empty file left around in STATEDIR. - */ - unlink(template); - return ret; -} - -static int copy_self_proc_exe(char **argv) { - char *exename; - int fd, mmfd, n_read, n_written; - struct stat st; - char buf[2048]; - - fd = open("/proc/self/exe", O_RDONLY | O_CLOEXEC); - if (fd == -1) { - fprintf(stderr, "open(\"/proc/self/exe\"): %m\n"); - return -1; - } - if (fstat(fd, &st) == -1) { - fprintf(stderr, "fstat(\"/proc/self/exe\"): %m\n"); - close(fd); - return -1; - } - exename = basename(argv[0]); - mmfd = syscall(SYS_memfd_create, exename, (long) MFD_ALLOW_SEALING | MFD_CLOEXEC); - if (mmfd == -1) { - fprintf(stderr, "memfd_create(): %m\n"); - goto close_fd; - } - for (;;) { - n_read = read(fd, buf, sizeof(buf)); - if (n_read < 0) { - fprintf(stderr, "read(\"/proc/self/exe\"): %m\n"); - return -1; - } - if (n_read == 0) { - break; - } - n_written = write(mmfd, buf, n_read); - if (n_written < 0) { - fprintf(stderr, "write(anonfd): %m\n"); - goto close_fd; - } - if (n_written != n_read) { - fprintf(stderr, "write(anonfd): short write (%d != %d)\n", n_written, n_read); - goto close_fd; - } - } - close(fd); - if (fcntl(mmfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE | F_SEAL_SEAL) == -1) { - fprintf(stderr, "Close_Fd sealing memfd copy: %m\n"); - goto close_mmfd; - } - - return mmfd; - -close_fd: - close(fd); -close_mmfd: - close(mmfd); - return -1; -} -static int containers_reexec(int flags) { - char **argv; - int fd = -1; - - argv = parse_proc_stringlist("/proc/self/cmdline"); - if (argv == NULL) { - return -1; - } - - if (flags & CLONE_NEWNS) - fd = try_bindfd(); - if (fd < 0) - fd = copy_self_proc_exe(argv); - if (fd < 0) - return fd; - - if (fexecve(fd, argv, environ) == -1) { - close(fd); - fprintf(stderr, "Error during reexec(...): %m\n"); - return -1; - } - close(fd); - return 0; -} - -void _containers_unshare(void) -{ - int flags, pidfd, continuefd, n, pgrp, sid, ctty; - char buf[2048]; - - flags = _containers_unshare_parse_envint("_Containers-unshare"); - if (flags == -1) { - return; - } - if ((flags & CLONE_NEWUSER) != 0) { - if (unshare(CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(CLONE_NEWUSER): %m\n"); - _check_proc_sys_file (_max_user_namespaces); - _check_proc_sys_file (_unprivileged_user_namespaces); - _exit(1); - } - } - pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); - if (pidfd != -1) { - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - size_t size = write(pidfd, buf, strlen(buf)); - if (size != strlen(buf)) { - fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); - _exit(1); - } - close(pidfd); - } - continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); - if (continuefd != -1) { - n = read(continuefd, buf, sizeof(buf)); - if (n > 0) { - fprintf(stderr, "Error: %.*s\n", n, buf); - _exit(1); - } - close(continuefd); - } - sid = _containers_unshare_parse_envint("_Containers-setsid"); - if (sid == 1) { - if (setsid() == -1) { - fprintf(stderr, "Error during setsid: %m\n"); - _exit(1); - } - } - pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); - if (pgrp == 1) { - if (setpgrp() == -1) { - fprintf(stderr, "Error during setpgrp: %m\n"); - _exit(1); - } - } - ctty = _containers_unshare_parse_envint("_Containers-ctty"); - if (ctty != -1) { - if (ioctl(ctty, TIOCSCTTY, 0) == -1) { - fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); - _exit(1); - } - } - if ((flags & CLONE_NEWUSER) != 0) { - if (setresgid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresgid(0): %m\n"); - _exit(1); - } - if (setresuid(0, 0, 0) != 0) { - fprintf(stderr, "Error during setresuid(0): %m\n"); - _exit(1); - } - } - if ((flags & ~CLONE_NEWUSER) != 0) { - if (unshare(flags & ~CLONE_NEWUSER) == -1) { - fprintf(stderr, "Error during unshare(...): %m\n"); - _exit(1); - } - } - if (containers_reexec(flags) != 0) { - _exit(1); - } - return; -} - -#endif // !UNSHARE_NO_CODE_AT_ALL diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare.go b/vendor/github.com/containers/storage/pkg/unshare/unshare.go deleted file mode 100644 index 00f397f350..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare.go +++ /dev/null @@ -1,32 +0,0 @@ -package unshare - -import ( - "fmt" - "os" - "os/user" - "sync" -) - -var ( - homeDirOnce sync.Once - homeDirErr error - homeDir string -) - -// HomeDir returns the home directory for the current user. -func HomeDir() (string, error) { - homeDirOnce.Do(func() { - home := os.Getenv("HOME") - if home == "" { - usr, err := user.LookupId(fmt.Sprintf("%d", GetRootlessUID())) - if err != nil { - homeDir, homeDirErr = "", fmt.Errorf("unable to resolve HOME directory: %w", err) - return - } - homeDir, homeDirErr = usr.HomeDir, nil - return - } - homeDir, homeDirErr = home, nil - }) - return homeDir, homeDirErr -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go deleted file mode 100644 index f575fba2e4..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build (linux && cgo && !gccgo) || (freebsd && cgo) - -package unshare - -// #cgo CFLAGS: -Wall -// extern void _containers_unshare(void); -// static void __attribute__((constructor)) init(void) { -// _containers_unshare(); -// } -import "C" diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go deleted file mode 100644 index 5d0a7a683c..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_darwin.go +++ /dev/null @@ -1,58 +0,0 @@ -//go:build darwin - -package unshare - -import ( - "os" - - "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - return true -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=") -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - return nil, nil, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c deleted file mode 100644 index 0b2f178869..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.c +++ /dev/null @@ -1,76 +0,0 @@ -#if !defined(UNSHARE_NO_CODE_AT_ALL) && defined(__FreeBSD__) - - -#include -#include -#include -#include -#include -#include - -static int _containers_unshare_parse_envint(const char *envname) { - char *p, *q; - long l; - - p = getenv(envname); - if (p == NULL) { - return -1; - } - q = NULL; - l = strtol(p, &q, 10); - if ((q == NULL) || (*q != '\0')) { - fprintf(stderr, "Error parsing \"%s\"=\"%s\"!\n", envname, p); - _exit(1); - } - unsetenv(envname); - return l; -} - -void _containers_unshare(void) -{ - int pidfd, continuefd, n, pgrp, sid, ctty; - char buf[2048]; - - pidfd = _containers_unshare_parse_envint("_Containers-pid-pipe"); - if (pidfd != -1) { - snprintf(buf, sizeof(buf), "%llu", (unsigned long long) getpid()); - size_t size = write(pidfd, buf, strlen(buf)); - if (size != strlen(buf)) { - fprintf(stderr, "Error writing PID to pipe on fd %d: %m\n", pidfd); - _exit(1); - } - close(pidfd); - } - continuefd = _containers_unshare_parse_envint("_Containers-continue-pipe"); - if (continuefd != -1) { - n = read(continuefd, buf, sizeof(buf)); - if (n > 0) { - fprintf(stderr, "Error: %.*s\n", n, buf); - _exit(1); - } - close(continuefd); - } - sid = _containers_unshare_parse_envint("_Containers-setsid"); - if (sid == 1) { - if (setsid() == -1) { - fprintf(stderr, "Error during setsid: %m\n"); - _exit(1); - } - } - pgrp = _containers_unshare_parse_envint("_Containers-setpgrp"); - if (pgrp == 1) { - if (setpgrp(0, 0) == -1) { - fprintf(stderr, "Error during setpgrp: %m\n"); - _exit(1); - } - } - ctty = _containers_unshare_parse_envint("_Containers-ctty"); - if (ctty != -1) { - if (ioctl(ctty, TIOCSCTTY, 0) == -1) { - fprintf(stderr, "Error while setting controlling terminal to %d: %m\n", ctty); - _exit(1); - } - } -} - -#endif diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go deleted file mode 100644 index 37a87fa5bd..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_freebsd.go +++ /dev/null @@ -1,178 +0,0 @@ -//go:build freebsd - -package unshare - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "runtime" - "strconv" - "syscall" - - "github.com/containers/storage/pkg/reexec" - "github.com/sirupsen/logrus" -) - -// Cmd wraps an exec.Cmd created by the reexec package in unshare(), -// and one day might handle setting ID maps and other related setting*s -// by triggering initialization code in the child. -type Cmd struct { - *exec.Cmd - Setsid bool - Setpgrp bool - Ctty *os.File - Hook func(pid int) error -} - -// Command creates a new Cmd which can be customized. -func Command(args ...string) *Cmd { - cmd := reexec.Command(args...) - return &Cmd{ - Cmd: cmd, - } -} - -func (c *Cmd) Start() error { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Set environment variables to tell the child to synchronize its startup. - if c.Env == nil { - c.Env = os.Environ() - } - - // Create the pipe for reading the child's PID. - pidRead, pidWrite, err := os.Pipe() - if err != nil { - return fmt.Errorf("creating pid pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, pidWrite) - - // Create the pipe for letting the child know to proceed. - continueRead, continueWrite, err := os.Pipe() - if err != nil { - pidRead.Close() - pidWrite.Close() - return fmt.Errorf("creating continue read/write pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, continueRead) - - // Pass along other instructions. - if c.Setsid { - c.Env = append(c.Env, "_Containers-setsid=1") - } - if c.Setpgrp { - c.Env = append(c.Env, "_Containers-setpgrp=1") - } - if c.Ctty != nil { - c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, c.Ctty) - } - - // Make sure we clean up our pipes. - defer func() { - if pidRead != nil { - pidRead.Close() - } - if pidWrite != nil { - pidWrite.Close() - } - if continueRead != nil { - continueRead.Close() - } - if continueWrite != nil { - continueWrite.Close() - } - }() - - // Start the new process. - err = c.Cmd.Start() - if err != nil { - return err - } - - // Close the ends of the pipes that the parent doesn't need. - continueRead.Close() - continueRead = nil - pidWrite.Close() - pidWrite = nil - - // Read the child's PID from the pipe. - pidString := "" - b := new(bytes.Buffer) - if _, err := io.Copy(b, pidRead); err != nil { - return fmt.Errorf("reading child PID: %w", err) - } - pidString = b.String() - pid, err := strconv.Atoi(pidString) - if err != nil { - fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return fmt.Errorf("parsing PID %q: %w", pidString, err) - } - - // Run any additional setup that we want to do before the child starts running proper. - if c.Hook != nil { - if err = c.Hook(pid); err != nil { - fmt.Fprintf(continueWrite, "hook error: %v", err) - return err - } - } - - return nil -} - -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - return c.Wait() -} - -func (c *Cmd) CombinedOutput() ([]byte, error) { - return nil, errors.New("unshare: CombinedOutput() not implemented") -} - -func (c *Cmd) Output() ([]byte, error) { - return nil, errors.New("unshare: Output() not implemented") -} - -type Runnable interface { - Run() error -} - -// ExecRunnable runs the specified unshare command, captures its exit status, -// and exits with the same status. -func ExecRunnable(cmd Runnable, cleanup func()) { - exit := func(status int) { - if cleanup != nil { - cleanup() - } - os.Exit(status) - } - if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if exitError.ProcessState.Exited() { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - logrus.Debugf("%v", exitError) - exit(waitStatus.ExitStatus()) - } - if waitStatus.Signaled() { - logrus.Debugf("%v", exitError) - exit(int(waitStatus.Signal()) + 128) - } - } - } - } - logrus.Errorf("%v", err) - logrus.Errorf("(Unable to determine exit status)") - exit(1) - } - exit(0) -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go deleted file mode 100644 index 818983474e..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_gccgo.go +++ /dev/null @@ -1,25 +0,0 @@ -//go:build linux && cgo && gccgo - -package unshare - -// #cgo CFLAGS: -Wall -Wextra -// extern void _containers_unshare(void); -// static void __attribute__((constructor)) init(void) { -// _containers_unshare(); -// } -import "C" - -// This next bit is straight out of libcontainer. - -// AlwaysFalse is here to stay false -// (and be exported so the compiler doesn't optimize out its reference) -var AlwaysFalse bool - -func init() { - if AlwaysFalse { - // by referencing this C init() in a noop test, it will ensure the compiler - // links in the C function. - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=65134 - C.init() - } -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go deleted file mode 100644 index 9e0e562d20..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_linux.go +++ /dev/null @@ -1,755 +0,0 @@ -//go:build linux - -package unshare - -import ( - "bufio" - "bytes" - "errors" - "fmt" - "io" - "os" - "os/exec" - "os/signal" - "os/user" - "runtime" - "strconv" - "strings" - "sync" - "syscall" - - "github.com/containers/storage/pkg/idtools" - "github.com/containers/storage/pkg/reexec" - "github.com/moby/sys/capability" - "github.com/opencontainers/runtime-spec/specs-go" - "github.com/sirupsen/logrus" -) - -// Cmd wraps an exec.Cmd created by the reexec package in unshare(), and -// handles setting ID maps and other related settings by triggering -// initialization code in the child. -type Cmd struct { - *exec.Cmd - UnshareFlags int - UseNewuidmap bool - UidMappings []specs.LinuxIDMapping //nolint: revive - UseNewgidmap bool - GidMappings []specs.LinuxIDMapping //nolint: revive - GidMappingsEnableSetgroups bool - Setsid bool - Setpgrp bool - Ctty *os.File - OOMScoreAdj *int - Hook func(pid int) error -} - -// Command creates a new Cmd which can be customized. -func Command(args ...string) *Cmd { - cmd := reexec.Command(args...) - return &Cmd{ - Cmd: cmd, - } -} - -func getRootlessUID() int { - uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Geteuid() -} - -func getRootlessGID() int { - gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") - if gidEnv != "" { - u, _ := strconv.Atoi(gidEnv) - return u - } - - /* If the _CONTAINERS_ROOTLESS_UID is set, assume the gid==uid. */ - uidEnv := os.Getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Getegid() -} - -// IsSetID checks if specified path has correct FileMode (Setuid|SETGID) or the -// matching file capability -func IsSetID(path string, modeid os.FileMode, capid capability.Cap) (bool, error) { - info, err := os.Stat(path) - if err != nil { - return false, err - } - - mode := info.Mode() - if mode&modeid == modeid { - return true, nil - } - cap, err := capability.NewFile2(path) - if err != nil { - return false, err - } - if err := cap.Load(); err != nil { - return false, err - } - return cap.Get(capability.EFFECTIVE, capid), nil -} - -func (c *Cmd) Start() (retErr error) { - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // Set an environment variable to tell the child to synchronize its startup. - if c.Env == nil { - c.Env = os.Environ() - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-unshare=%d", c.UnshareFlags)) - - // Please the libpod "rootless" package to find the expected env variables. - if IsRootless() { - c.Env = append(c.Env, "_CONTAINERS_USERNS_CONFIGURED=done") - c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_UID=%d", getRootlessUID())) - c.Env = append(c.Env, fmt.Sprintf("_CONTAINERS_ROOTLESS_GID=%d", getRootlessGID())) - } - - // Create the pipe for reading the child's PID. - pidRead, pidWrite, err := os.Pipe() - if err != nil { - return fmt.Errorf("creating pid pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-pid-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, pidWrite) - - // Create the pipe for letting the child know to proceed. - continueRead, continueWrite, err := os.Pipe() - if err != nil { - pidRead.Close() - pidWrite.Close() - return fmt.Errorf("creating continue read/write pipe: %w", err) - } - c.Env = append(c.Env, fmt.Sprintf("_Containers-continue-pipe=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, continueRead) - - // Pass along other instructions. - if c.Setsid { - c.Env = append(c.Env, "_Containers-setsid=1") - } - if c.Setpgrp { - c.Env = append(c.Env, "_Containers-setpgrp=1") - } - if c.Ctty != nil { - c.Env = append(c.Env, fmt.Sprintf("_Containers-ctty=%d", len(c.ExtraFiles)+3)) - c.ExtraFiles = append(c.ExtraFiles, c.Ctty) - } - - // Make sure we clean up our pipes. - defer func() { - if pidRead != nil { - pidRead.Close() - } - if pidWrite != nil { - pidWrite.Close() - } - if continueRead != nil { - continueRead.Close() - } - if continueWrite != nil { - continueWrite.Close() - } - }() - - // Start the new process. - err = c.Cmd.Start() - if err != nil { - return err - } - - // If the function fails from here, we need to make sure the - // child process is killed and properly cleaned up. - defer func() { - if retErr != nil { - _ = c.Cmd.Process.Kill() - _ = c.Cmd.Wait() - } - }() - - // Close the ends of the pipes that the parent doesn't need. - continueRead.Close() - continueRead = nil - pidWrite.Close() - pidWrite = nil - - // Read the child's PID from the pipe. - b := new(bytes.Buffer) - if _, err := io.Copy(b, pidRead); err != nil { - return fmt.Errorf("reading child PID: %w", err) - } - pidString := b.String() - pid, err := strconv.Atoi(pidString) - if err != nil { - fmt.Fprintf(continueWrite, "error parsing PID %q: %v", pidString, err) - return fmt.Errorf("parsing PID %q: %w", pidString, err) - } - pidString = fmt.Sprintf("%d", pid) - - // If we created a new user namespace, set any specified mappings. - if c.UnshareFlags&syscall.CLONE_NEWUSER != 0 { - // Always set "setgroups". - setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening setgroups: %v", err) - return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) - } - defer setgroups.Close() - if c.GidMappingsEnableSetgroups { - if _, err := fmt.Fprintf(setgroups, "allow"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"allow\" to setgroups: %v", err) - return fmt.Errorf("opening \"allow\" to /proc/%s/setgroups: %w", pidString, err) - } - } else { - if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { - fmt.Fprintf(continueWrite, "error writing \"deny\" to setgroups: %v", err) - return fmt.Errorf("writing \"deny\" to /proc/%s/setgroups: %w", pidString, err) - } - } - - if len(c.UidMappings) == 0 || len(c.GidMappings) == 0 { - uidmap, gidmap, err := GetHostIDMappings("") - if err != nil { - fmt.Fprintf(continueWrite, "error reading ID mappings in parent: %v", err) - return fmt.Errorf("reading ID mappings in parent: %w", err) - } - if len(c.UidMappings) == 0 { - c.UidMappings = uidmap - for i := range c.UidMappings { - c.UidMappings[i].HostID = c.UidMappings[i].ContainerID - } - } - if len(c.GidMappings) == 0 { - c.GidMappings = gidmap - for i := range c.GidMappings { - c.GidMappings[i].HostID = c.GidMappings[i].ContainerID - } - } - } - - if len(c.GidMappings) > 0 { - // Build the GID map, since writing to the proc file has to be done all at once. - g := new(bytes.Buffer) - for _, m := range c.GidMappings { - fmt.Fprintf(g, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - gidmapSet := false - // Set the GID map. - if c.UseNewgidmap { - path, err := exec.LookPath("newgidmap") - if err != nil { - return fmt.Errorf("finding newgidmap: %w", err) - } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(g.String())...)...) - g.Reset() - cmd.Stdout = g - cmd.Stderr = g - if err := cmd.Run(); err == nil { - gidmapSet = true - } else { - logrus.Warnf("running newgidmap: %v: %s", err, g.String()) - isSetgid, err := IsSetID(path, os.ModeSetgid, capability.CAP_SETGID) - if err != nil { - logrus.Warnf("Failed to check for setgid on %s: %v", path, err) - } else { - if !isSetgid { - logrus.Warnf("%s should be setgid or have filecaps setgid", path) - } - } - logrus.Warnf("Falling back to single mapping") - g.Reset() - fmt.Fprintf(g, "0 %d 1\n", os.Getegid()) - } - } - if !gidmapSet { - if c.UseNewgidmap { - setgroups, err := os.OpenFile(fmt.Sprintf("/proc/%s/setgroups", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/setgroups: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/setgroups: %w", pidString, err) - } - defer setgroups.Close() - if _, err := fmt.Fprintf(setgroups, "deny"); err != nil { - fmt.Fprintf(continueWrite, "error writing 'deny' to /proc/%s/setgroups: %v", pidString, err) - return fmt.Errorf("writing 'deny' to /proc/%s/setgroups: %w", pidString, err) - } - } - gidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/gid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "opening /proc/%s/gid_map: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/gid_map: %w", pidString, err) - } - defer gidmap.Close() - if _, err := fmt.Fprintf(gidmap, "%s", g.String()); err != nil { - fmt.Fprintf(continueWrite, "writing %q to /proc/%s/gid_map: %v", g.String(), pidString, err) - return fmt.Errorf("writing %q to /proc/%s/gid_map: %w", g.String(), pidString, err) - } - } - } - - if len(c.UidMappings) > 0 { - // Build the UID map, since writing to the proc file has to be done all at once. - u := new(bytes.Buffer) - for _, m := range c.UidMappings { - fmt.Fprintf(u, "%d %d %d\n", m.ContainerID, m.HostID, m.Size) - } - uidmapSet := false - // Set the UID map. - if c.UseNewuidmap { - path, err := exec.LookPath("newuidmap") - if err != nil { - return fmt.Errorf("finding newuidmap: %w", err) - } - cmd := exec.Command(path, append([]string{pidString}, strings.Fields(u.String())...)...) - u.Reset() - cmd.Stdout = u - cmd.Stderr = u - if err := cmd.Run(); err == nil { - uidmapSet = true - } else { - logrus.Warnf("Error running newuidmap: %v: %s", err, u.String()) - isSetuid, err := IsSetID(path, os.ModeSetuid, capability.CAP_SETUID) - if err != nil { - logrus.Warnf("Failed to check for setuid on %s: %v", path, err) - } else { - if !isSetuid { - logrus.Warnf("%s should be setuid or have filecaps setuid", path) - } - } - - logrus.Warnf("Falling back to single mapping") - u.Reset() - fmt.Fprintf(u, "0 %d 1\n", os.Geteuid()) - } - } - if !uidmapSet { - uidmap, err := os.OpenFile(fmt.Sprintf("/proc/%s/uid_map", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening /proc/%s/uid_map: %v", pidString, err) - return fmt.Errorf("opening /proc/%s/uid_map: %w", pidString, err) - } - defer uidmap.Close() - if _, err := fmt.Fprintf(uidmap, "%s", u.String()); err != nil { - fmt.Fprintf(continueWrite, "error writing %q to /proc/%s/uid_map: %v", u.String(), pidString, err) - return fmt.Errorf("writing %q to /proc/%s/uid_map: %w", u.String(), pidString, err) - } - } - } - } - - if c.OOMScoreAdj != nil { - oomScoreAdj, err := os.OpenFile(fmt.Sprintf("/proc/%s/oom_score_adj", pidString), os.O_TRUNC|os.O_WRONLY, 0) - if err != nil { - fmt.Fprintf(continueWrite, "error opening oom_score_adj: %v", err) - return fmt.Errorf("opening /proc/%s/oom_score_adj: %w", pidString, err) - } - defer oomScoreAdj.Close() - if _, err := fmt.Fprintf(oomScoreAdj, "%d\n", *c.OOMScoreAdj); err != nil { - fmt.Fprintf(continueWrite, "error writing \"%d\" to oom_score_adj: %v", c.OOMScoreAdj, err) - return fmt.Errorf("writing \"%d\" to /proc/%s/oom_score_adj: %w", c.OOMScoreAdj, pidString, err) - } - } - // Run any additional setup that we want to do before the child starts running proper. - if c.Hook != nil { - if err = c.Hook(pid); err != nil { - fmt.Fprintf(continueWrite, "hook error: %v", err) - return err - } - } - - return nil -} - -func (c *Cmd) Run() error { - if err := c.Start(); err != nil { - return err - } - return c.Wait() -} - -func (c *Cmd) CombinedOutput() ([]byte, error) { - return nil, errors.New("unshare: CombinedOutput() not implemented") -} - -func (c *Cmd) Output() ([]byte, error) { - return nil, errors.New("unshare: Output() not implemented") -} - -var ( - isRootlessOnce sync.Once - isRootless bool -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// hasFullUsersMappings checks whether the current user namespace has all the IDs mapped. -func hasFullUsersMappings() (bool, error) { - content, err := os.ReadFile("/proc/self/uid_map") - if err != nil { - return false, err - } - // The kernel rejects attempts to create mappings where either starting - // point is (u32)-1: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/kernel/user_namespace.c?id=af3e9579ecfb#n1006 . - // So, if the uid_map contains 4294967295, the entire IDs space is available in the - // user namespace, so it is likely the initial user namespace. - return bytes.Contains(content, []byte("4294967295")), nil -} - -var ( - hasCapSysAdminOnce sync.Once - hasCapSysAdminRet bool - hasCapSysAdminErr error -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - isRootlessOnce.Do(func() { - isRootless = getRootlessUID() != 0 || getenv(UsernsEnvName) != "" - if !isRootless { - hasCapSysAdmin, err := HasCapSysAdmin() - if err != nil { - logrus.Warnf("Failed to read CAP_SYS_ADMIN presence for the current process") - } - if err == nil && !hasCapSysAdmin { - isRootless = true - } - } - if !isRootless { - hasMappings, err := hasFullUsersMappings() - if err != nil { - logrus.Warnf("Failed to read current user namespace mappings") - } - if err == nil && !hasMappings { - isRootless = true - } - } - }) - return isRootless -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - uidEnv := getenv("_CONTAINERS_ROOTLESS_UID") - if uidEnv != "" { - u, _ := strconv.Atoi(uidEnv) - return u - } - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - gidEnv := getenv("_CONTAINERS_ROOTLESS_GID") - if gidEnv != "" { - u, _ := strconv.Atoi(gidEnv) - return u - } - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=done") -} - -type Runnable interface { - Run() error -} - -func bailOnError(err error, format string, a ...any) { //nolint:revive,goprintffuncname - if err != nil { - if format != "" { - logrus.Errorf("%s: %v", fmt.Sprintf(format, a...), err) - } else { - logrus.Errorf("%v", err) - } - os.Exit(1) - } -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { - // If we've already been through this once, no need to try again. - if os.Geteuid() == 0 && GetRootlessUID() > 0 { - return - } - - var uidNum, gidNum uint64 - // Figure out who we are. - me, err := user.Current() - if !os.IsNotExist(err) { - bailOnError(err, "error determining current user") - uidNum, err = strconv.ParseUint(me.Uid, 10, 32) - bailOnError(err, "error parsing current UID %s", me.Uid) - gidNum, err = strconv.ParseUint(me.Gid, 10, 32) - bailOnError(err, "error parsing current GID %s", me.Gid) - } - - runtime.LockOSThread() - defer runtime.UnlockOSThread() - - // ID mappings to use to reexec ourselves. - var uidmap, gidmap []specs.LinuxIDMapping - if uidNum != 0 || evenForRoot { - // Read the set of ID mappings that we're allowed to use. Each - // range in /etc/subuid and /etc/subgid file is a starting host - // ID and a range size. - uidmap, gidmap, err = GetSubIDMappings(me.Username, me.Username) - if err != nil { - logrus.Warnf("Reading allowed ID mappings: %v", err) - } - if len(uidmap) == 0 { - logrus.Warnf("Found no UID ranges set aside for user %q in /etc/subuid.", me.Username) - } - if len(gidmap) == 0 { - logrus.Warnf("Found no GID ranges set aside for user %q in /etc/subgid.", me.Username) - } - // Map our UID and GID, then the subuid and subgid ranges, - // consecutively, starting at 0, to get the mappings to use for - // a copy of ourselves. - uidmap = append([]specs.LinuxIDMapping{{HostID: uint32(uidNum), ContainerID: 0, Size: 1}}, uidmap...) - gidmap = append([]specs.LinuxIDMapping{{HostID: uint32(gidNum), ContainerID: 0, Size: 1}}, gidmap...) - var rangeStart uint32 - for i := range uidmap { - uidmap[i].ContainerID = rangeStart - rangeStart += uidmap[i].Size - } - rangeStart = 0 - for i := range gidmap { - gidmap[i].ContainerID = rangeStart - rangeStart += gidmap[i].Size - } - } else { - // If we have CAP_SYS_ADMIN, then we don't need to create a new namespace in order to be able - // to use unshare(), so don't bother creating a new user namespace at this point. - capabilities, err := capability.NewPid2(0) - bailOnError(err, "Initializing a new Capabilities object of pid 0") - err = capabilities.Load() - bailOnError(err, "Reading the current capabilities sets") - - if capabilities.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) { - return - } - // Read the set of ID mappings that we're currently using. - uidmap, gidmap, err = GetHostIDMappings("") - bailOnError(err, "Reading current ID mappings") - // Just reuse them. - for i := range uidmap { - uidmap[i].HostID = uidmap[i].ContainerID - } - for i := range gidmap { - gidmap[i].HostID = gidmap[i].ContainerID - } - } - - // Unlike most uses of reexec or unshare, we're using a name that - // _won't_ be recognized as a registered reexec handler, since we - // _want_ to fall through reexec.Init() to the normal main(). - cmd := Command(append([]string{fmt.Sprintf("%s-in-a-user-namespace", os.Args[0])}, os.Args[1:]...)...) - - // If, somehow, we don't become UID 0 in our child, indicate that the child shouldn't try again. - err = os.Setenv(UsernsEnvName, "1") - bailOnError(err, "error setting %s=1 in environment", UsernsEnvName) - - // Set the default isolation type to use the "rootless" method. - if _, present := os.LookupEnv("BUILDAH_ISOLATION"); !present { - if err = os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { - if err := os.Setenv("BUILDAH_ISOLATION", "rootless"); err != nil { - logrus.Errorf("Setting BUILDAH_ISOLATION=rootless in environment: %v", err) - os.Exit(1) - } - } - } - - // Reuse our stdio. - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - // Set up a new user namespace with the ID mapping. - cmd.UnshareFlags = syscall.CLONE_NEWUSER | syscall.CLONE_NEWNS - cmd.UseNewuidmap = uidNum != 0 - cmd.UidMappings = uidmap - cmd.UseNewgidmap = uidNum != 0 - cmd.GidMappings = gidmap - cmd.GidMappingsEnableSetgroups = true - - // Finish up. - logrus.Debugf("Running %+v with environment %+v, UID map %+v, and GID map %+v", cmd.Cmd.Args, os.Environ(), cmd.UidMappings, cmd.GidMappings) - - // Forward SIGHUP, SIGINT, and SIGTERM to our child process. - interrupted := make(chan os.Signal, 100) - defer func() { - signal.Stop(interrupted) - close(interrupted) - }() - cmd.Hook = func(int) error { - go func() { - for receivedSignal := range interrupted { - if err := cmd.Cmd.Process.Signal(receivedSignal); err != nil { - logrus.Warnf( - "Failed to send a signal '%d' to the Process (PID: %d): %v", - receivedSignal, cmd.Cmd.Process.Pid, err, - ) - } - } - }() - return nil - } - signal.Notify(interrupted, syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM) - - // Make sure our child process gets SIGKILLed if we exit, for whatever - // reason, before it does. - if cmd.Cmd.SysProcAttr == nil { - cmd.Cmd.SysProcAttr = &syscall.SysProcAttr{} - } - cmd.Cmd.SysProcAttr.Pdeathsig = syscall.SIGKILL - - ExecRunnable(cmd, nil) -} - -// ExecRunnable runs the specified unshare command, captures its exit status, -// and exits with the same status. -func ExecRunnable(cmd Runnable, cleanup func()) { - exit := func(status int) { - if cleanup != nil { - cleanup() - } - os.Exit(status) - } - if err := cmd.Run(); err != nil { - if exitError, ok := err.(*exec.ExitError); ok { - if exitError.ProcessState.Exited() { - if waitStatus, ok := exitError.ProcessState.Sys().(syscall.WaitStatus); ok { - if waitStatus.Exited() { - logrus.Debugf("%v", exitError) - exit(waitStatus.ExitStatus()) - } - if waitStatus.Signaled() { - logrus.Debugf("%v", exitError) - exit(int(waitStatus.Signal()) + 128) - } - } - } - } - logrus.Errorf("%v", err) - logrus.Errorf("(Unable to determine exit status)") - exit(1) - } - exit(0) -} - -// getHostIDMappings reads mappings from the named node under /proc. -func getHostIDMappings(path string) ([]specs.LinuxIDMapping, error) { - var mappings []specs.LinuxIDMapping - f, err := os.Open(path) - if err != nil { - return nil, fmt.Errorf("reading ID mappings from %q: %w", path, err) - } - defer f.Close() - scanner := bufio.NewScanner(f) - for scanner.Scan() { - line := scanner.Text() - fields := strings.Fields(line) - if len(fields) != 3 { - return nil, fmt.Errorf("line %q from %q has %d fields, not 3", line, path, len(fields)) - } - cid, err := strconv.ParseUint(fields[0], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing container ID value %q from line %q in %q: %w", fields[0], line, path, err) - } - hid, err := strconv.ParseUint(fields[1], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing host ID value %q from line %q in %q: %w", fields[1], line, path, err) - } - size, err := strconv.ParseUint(fields[2], 10, 32) - if err != nil { - return nil, fmt.Errorf("parsing size value %q from line %q in %q: %w", fields[2], line, path, err) - } - mappings = append(mappings, specs.LinuxIDMapping{ContainerID: uint32(cid), HostID: uint32(hid), Size: uint32(size)}) - } - return mappings, nil -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - if pid == "" { - pid = "self" - } - uidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/uid_map", pid)) - if err != nil { - return nil, nil, err - } - gidmap, err := getHostIDMappings(fmt.Sprintf("/proc/%s/gid_map", pid)) - if err != nil { - return nil, nil, err - } - return uidmap, gidmap, nil -} - -// GetSubIDMappings reads mappings from /etc/subuid and /etc/subgid. -func GetSubIDMappings(user, group string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - mappings, err := idtools.NewIDMappings(user, group) - if err != nil { - return nil, nil, fmt.Errorf("reading subuid mappings for user %q and subgid mappings for group %q: %w", user, group, err) - } - var uidmap, gidmap []specs.LinuxIDMapping - for _, m := range mappings.UIDs() { - uidmap = append(uidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - for _, m := range mappings.GIDs() { - gidmap = append(gidmap, specs.LinuxIDMapping{ - ContainerID: uint32(m.ContainerID), - HostID: uint32(m.HostID), - Size: uint32(m.Size), - }) - } - return uidmap, gidmap, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - uid, err := idtools.ParseIDMap(uidmap, "userns-uid-map") - if err != nil { - return nil, nil, err - } - gid, err := idtools.ParseIDMap(gidmap, "userns-gid-map") - if err != nil { - return nil, nil, err - } - return uid, gid, nil -} - -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - hasCapSysAdminOnce.Do(func() { - currentCaps, err := capability.NewPid2(0) - if err != nil { - hasCapSysAdminErr = err - return - } - if err = currentCaps.Load(); err != nil { - hasCapSysAdminErr = err - return - } - hasCapSysAdminRet = currentCaps.Get(capability.EFFECTIVE, capability.CAP_SYS_ADMIN) - }) - return hasCapSysAdminRet, hasCapSysAdminErr -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go deleted file mode 100644 index 05706b8fe6..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported.go +++ /dev/null @@ -1,55 +0,0 @@ -//go:build !linux && !darwin - -package unshare - -import ( - "os" - - "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/runtime-spec/specs-go" -) - -const ( - // UsernsEnvName is the environment variable, if set indicates in rootless mode - UsernsEnvName = "_CONTAINERS_USERNS_CONFIGURED" -) - -// IsRootless tells us if we are running in rootless mode -func IsRootless() bool { - return os.Getuid() != 0 -} - -// GetRootlessUID returns the UID of the user in the parent userNS -func GetRootlessUID() int { - return os.Getuid() -} - -// GetRootlessGID returns the GID of the user in the parent userNS -func GetRootlessGID() int { - return os.Getgid() -} - -// RootlessEnv returns the environment settings for the rootless containers -func RootlessEnv() []string { - return append(os.Environ(), UsernsEnvName+"=") -} - -// MaybeReexecUsingUserNamespace re-exec the process in a new namespace -func MaybeReexecUsingUserNamespace(evenForRoot bool) { -} - -// GetHostIDMappings reads mappings for the specified process (or the current -// process if pid is "self" or an empty string) from the kernel. -func GetHostIDMappings(pid string) ([]specs.LinuxIDMapping, []specs.LinuxIDMapping, error) { - return nil, nil, nil -} - -// ParseIDMappings parses mapping triples. -func ParseIDMappings(uidmap, gidmap []string) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -// HasCapSysAdmin returns whether the current process has CAP_SYS_ADMIN. -func HasCapSysAdmin() (bool, error) { - return os.Geteuid() == 0, nil -} diff --git a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go b/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go deleted file mode 100644 index ae2869d74b..0000000000 --- a/vendor/github.com/containers/storage/pkg/unshare/unshare_unsupported_cgo.go +++ /dev/null @@ -1,10 +0,0 @@ -//go:build cgo && !(linux || freebsd) - -package unshare - -// Go refuses to compile a subpackage with CGO_ENABLED=1 if there is a *.c file but no 'import "C"'. -// OTOH if we did have an 'import "C"', the Linux-only code would fail to compile. -// So, satisfy the Go compiler by using import "C" but #ifdef-ing out all of the code. - -// #cgo CPPFLAGS: -DUNSHARE_NO_CODE_AT_ALL -import "C" diff --git a/vendor/modules.txt b/vendor/modules.txt index 1ca73fcff2..b4eeeecbd0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -138,62 +138,12 @@ github.com/containerd/ttrpc # github.com/containerd/typeurl/v2 v2.2.3 ## explicit; go 1.21 github.com/containerd/typeurl/v2 -# github.com/containers/image/v5 v5.36.2 -## explicit; go 1.23.3 -github.com/containers/image/v5/docker -github.com/containers/image/v5/docker/policyconfiguration -github.com/containers/image/v5/docker/reference -github.com/containers/image/v5/internal/blobinfocache -github.com/containers/image/v5/internal/image -github.com/containers/image/v5/internal/imagedestination/impl -github.com/containers/image/v5/internal/imagedestination/stubs -github.com/containers/image/v5/internal/imagesource -github.com/containers/image/v5/internal/imagesource/impl -github.com/containers/image/v5/internal/imagesource/stubs -github.com/containers/image/v5/internal/iolimits -github.com/containers/image/v5/internal/manifest -github.com/containers/image/v5/internal/multierr -github.com/containers/image/v5/internal/pkg/platform -github.com/containers/image/v5/internal/private -github.com/containers/image/v5/internal/putblobdigest -github.com/containers/image/v5/internal/rootless -github.com/containers/image/v5/internal/set -github.com/containers/image/v5/internal/signature -github.com/containers/image/v5/internal/streamdigest -github.com/containers/image/v5/internal/tmpdir -github.com/containers/image/v5/internal/uploadreader -github.com/containers/image/v5/internal/useragent -github.com/containers/image/v5/manifest -github.com/containers/image/v5/pkg/blobinfocache/none -github.com/containers/image/v5/pkg/compression/internal -github.com/containers/image/v5/pkg/compression/types -github.com/containers/image/v5/pkg/docker/config -github.com/containers/image/v5/pkg/strslice -github.com/containers/image/v5/pkg/sysregistriesv2 -github.com/containers/image/v5/pkg/tlsclientconfig -github.com/containers/image/v5/transports -github.com/containers/image/v5/types -github.com/containers/image/v5/version # github.com/containers/libtrust v0.0.0-20230121012942-c1716e8a8d01 ## explicit github.com/containers/libtrust # github.com/containers/ocicrypt v1.2.1 ## explicit; go 1.22 github.com/containers/ocicrypt/spec -# github.com/containers/storage v1.59.1 -## explicit; go 1.23.0 -github.com/containers/storage/internal/rawfilelock -github.com/containers/storage/pkg/fileutils -github.com/containers/storage/pkg/homedir -github.com/containers/storage/pkg/idtools -github.com/containers/storage/pkg/ioutils -github.com/containers/storage/pkg/lockfile -github.com/containers/storage/pkg/longpath -github.com/containers/storage/pkg/mount -github.com/containers/storage/pkg/reexec -github.com/containers/storage/pkg/regexp -github.com/containers/storage/pkg/system -github.com/containers/storage/pkg/unshare # github.com/coreos/go-semver v0.3.1 ## explicit; go 1.8 github.com/coreos/go-semver/semver