From b7179d1cac50ab1c6c492d5bc5cfc8110d1b5d8a Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:05:22 -0300 Subject: [PATCH 1/8] Add multi-operator-manager dependency Generated with Claude Code --- go.mod | 6 + go.sum | 12 + vendor/github.com/PaesslerAG/gval/.gitignore | 30 + vendor/github.com/PaesslerAG/gval/.travis.yml | 12 + vendor/github.com/PaesslerAG/gval/LICENSE | 12 + vendor/github.com/PaesslerAG/gval/README.md | 163 + .../github.com/PaesslerAG/gval/evaluable.go | 363 ++ .../github.com/PaesslerAG/gval/functions.go | 128 + vendor/github.com/PaesslerAG/gval/gval.go | 336 + vendor/github.com/PaesslerAG/gval/language.go | 281 + vendor/github.com/PaesslerAG/gval/operator.go | 405 ++ vendor/github.com/PaesslerAG/gval/parse.go | 347 + vendor/github.com/PaesslerAG/gval/parser.go | 147 + .../PaesslerAG/gval/prtg-batmin-gopher.png | Bin 0 -> 34696 bytes .../github.com/PaesslerAG/jsonpath/.gitignore | 30 + .../PaesslerAG/jsonpath/.travis.yml | 6 + vendor/github.com/PaesslerAG/jsonpath/LICENSE | 12 + .../github.com/PaesslerAG/jsonpath/README.md | 11 + .../PaesslerAG/jsonpath/jsonpath.go | 54 + .../github.com/PaesslerAG/jsonpath/parse.go | 204 + vendor/github.com/PaesslerAG/jsonpath/path.go | 103 + .../PaesslerAG/jsonpath/placeholder.go | 181 + .../PaesslerAG/jsonpath/selector.go | 203 + vendor/github.com/PaesslerAG/jsonpath/test.sh | 15 + .../library-go/pkg/manifestclient/context.go | 18 + .../default-discovery/README.md | 2 + .../aggregated-discovery-api.yaml | 493 ++ .../aggregated-discovery-apis.yaml | 5720 +++++++++++++++++ .../pkg/manifestclient/discovery_reader.go | 150 + .../library-go/pkg/manifestclient/encoding.go | 90 + .../library-go/pkg/manifestclient/get.go | 123 + .../library-go/pkg/manifestclient/io.go | 37 + .../library-go/pkg/manifestclient/list.go | 307 + .../mutation_directory_reader.go | 170 + .../mutation_directory_writer.go | 47 + .../pkg/manifestclient/mutation_tracker.go | 164 + .../pkg/manifestclient/read_roundtripper.go | 158 + .../manifestclient/readwrite_roundtripper.go | 118 + .../pkg/manifestclient/serialized_request.go | 371 ++ .../pkg/manifestclient/write_roundtripper.go | 276 + .../openshift/multi-operator-manager/LICENSE | 201 + .../library/libraryinputresources/command.go | 78 + .../libraryinputresources/easy_creation.go | 73 + .../library/libraryinputresources/options.go | 86 + .../libraryinputresources/prune_mustgather.go | 310 + .../library/libraryinputresources/resource.go | 273 + .../library/libraryinputresources/types.go | 110 + .../libraryinputresources/validation.go | 103 + .../library/libraryoutputresources/command.go | 74 + .../libraryoutputresources/easy_creation.go | 93 + .../library/libraryoutputresources/options.go | 40 + .../library/libraryoutputresources/types.go | 59 + .../github.com/shopspring/decimal/.gitignore | 9 + .../github.com/shopspring/decimal/.travis.yml | 19 + .../shopspring/decimal/CHANGELOG.md | 49 + vendor/github.com/shopspring/decimal/LICENSE | 45 + .../github.com/shopspring/decimal/README.md | 130 + .../shopspring/decimal/decimal-go.go | 415 ++ .../github.com/shopspring/decimal/decimal.go | 1904 ++++++ .../github.com/shopspring/decimal/rounding.go | 160 + vendor/k8s.io/cli-runtime/LICENSE | 202 + .../pkg/genericiooptions/io_options.go | 56 + vendor/modules.txt | 17 + 63 files changed, 15811 insertions(+) create mode 100644 vendor/github.com/PaesslerAG/gval/.gitignore create mode 100644 vendor/github.com/PaesslerAG/gval/.travis.yml create mode 100644 vendor/github.com/PaesslerAG/gval/LICENSE create mode 100644 vendor/github.com/PaesslerAG/gval/README.md create mode 100644 vendor/github.com/PaesslerAG/gval/evaluable.go create mode 100644 vendor/github.com/PaesslerAG/gval/functions.go create mode 100644 vendor/github.com/PaesslerAG/gval/gval.go create mode 100644 vendor/github.com/PaesslerAG/gval/language.go create mode 100644 vendor/github.com/PaesslerAG/gval/operator.go create mode 100644 vendor/github.com/PaesslerAG/gval/parse.go create mode 100644 vendor/github.com/PaesslerAG/gval/parser.go create mode 100644 vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png create mode 100644 vendor/github.com/PaesslerAG/jsonpath/.gitignore create mode 100644 vendor/github.com/PaesslerAG/jsonpath/.travis.yml create mode 100644 vendor/github.com/PaesslerAG/jsonpath/LICENSE create mode 100644 vendor/github.com/PaesslerAG/jsonpath/README.md create mode 100644 vendor/github.com/PaesslerAG/jsonpath/jsonpath.go create mode 100644 vendor/github.com/PaesslerAG/jsonpath/parse.go create mode 100644 vendor/github.com/PaesslerAG/jsonpath/path.go create mode 100644 vendor/github.com/PaesslerAG/jsonpath/placeholder.go create mode 100644 vendor/github.com/PaesslerAG/jsonpath/selector.go create mode 100644 vendor/github.com/PaesslerAG/jsonpath/test.sh create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/context.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/README.md create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-api.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-apis.yaml create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/discovery_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/encoding.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/get.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/io.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/list.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_reader.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_writer.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_tracker.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/read_roundtripper.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/readwrite_roundtripper.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/serialized_request.go create mode 100644 vendor/github.com/openshift/library-go/pkg/manifestclient/write_roundtripper.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/LICENSE create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/command.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/easy_creation.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/options.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/prune_mustgather.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/resource.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/types.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/validation.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/command.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/easy_creation.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/options.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/types.go create mode 100644 vendor/github.com/shopspring/decimal/.gitignore create mode 100644 vendor/github.com/shopspring/decimal/.travis.yml create mode 100644 vendor/github.com/shopspring/decimal/CHANGELOG.md create mode 100644 vendor/github.com/shopspring/decimal/LICENSE create mode 100644 vendor/github.com/shopspring/decimal/README.md create mode 100644 vendor/github.com/shopspring/decimal/decimal-go.go create mode 100644 vendor/github.com/shopspring/decimal/decimal.go create mode 100644 vendor/github.com/shopspring/decimal/rounding.go create mode 100644 vendor/k8s.io/cli-runtime/LICENSE create mode 100644 vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go diff --git a/go.mod b/go.mod index 6a0ec258a..bbd16822c 100644 --- a/go.mod +++ b/go.mod @@ -11,6 +11,7 @@ require ( github.com/openshift/build-machinery-go v0.0.0-20250530140348-dc5b2804eeee github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 github.com/openshift/library-go v0.0.0-20251104164011-e9c2485b059c + github.com/openshift/multi-operator-manager v0.0.0-20250930141021-05cb0b9abdb4 github.com/prometheus/client_golang v1.22.0 github.com/prometheus/common v0.62.0 github.com/spf13/cobra v1.9.1 @@ -26,9 +27,13 @@ require ( k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 ) +require k8s.io/cli-runtime v0.30.2 + require ( cel.dev/expr v0.24.0 // indirect github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/PaesslerAG/gval v1.2.3 // indirect + github.com/PaesslerAG/jsonpath v0.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect @@ -76,6 +81,7 @@ require ( github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/robfig/cron v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect github.com/x448/float16 v0.8.4 // indirect diff --git a/go.sum b/go.sum index 9d03faa8a..98c64fea2 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,12 @@ cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= +github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= +github.com/PaesslerAG/gval v1.2.3 h1:Z3B/zLyWvqxjUtkIOEkFauqLnQn8Q37F1Q+uAjLXgMw= +github.com/PaesslerAG/gval v1.2.3/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac= +github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= +github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEsylIk= +github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -167,6 +173,8 @@ github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235 h1:9JBeIXmnHlp github.com/openshift/client-go v0.0.0-20251015124057-db0dee36e235/go.mod h1:L49W6pfrZkfOE5iC1PqEkuLkXG4W0BX4w8b+L2Bv7fM= github.com/openshift/library-go v0.0.0-20251104164011-e9c2485b059c h1:fCvbOJjMSbJaDK53vBo2nCL0xpvqO2zuvFyJxI0HTgM= github.com/openshift/library-go v0.0.0-20251104164011-e9c2485b059c/go.mod h1:OlFFws1AO51uzfc48MsStGE4SFMWlMZD0+f5a/zCtKI= +github.com/openshift/multi-operator-manager v0.0.0-20250930141021-05cb0b9abdb4 h1:OWsZlBMtkYhFrZJ9FzlvwIYs1N/JrPKTwyBk45TWLOU= +github.com/openshift/multi-operator-manager v0.0.0-20250930141021-05cb0b9abdb4/go.mod h1:gzGgjkInMrsF0XNUpVYQDo0LS6ojxFfrg2DkQbJy9lI= github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzbfBr3hhjoZnS66nKrHmduYNpc34ny7RK4z5/HM0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -190,6 +198,8 @@ github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfm github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= @@ -345,6 +355,8 @@ k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= +k8s.io/cli-runtime v0.30.2 h1:ooM40eEJusbgHNEqnHziN9ZpLN5U4WcQGsdLKVxpkKE= +k8s.io/cli-runtime v0.30.2/go.mod h1:Y4g/2XezFyTATQUbvV5WaChoUGhojv/jZAtdp5Zkm0A= k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= diff --git a/vendor/github.com/PaesslerAG/gval/.gitignore b/vendor/github.com/PaesslerAG/gval/.gitignore new file mode 100644 index 000000000..98576e300 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +coverage.out + +manual_test.go +*.out +*.err + +.vscode \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/gval/.travis.yml b/vendor/github.com/PaesslerAG/gval/.travis.yml new file mode 100644 index 000000000..fdacb5857 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/.travis.yml @@ -0,0 +1,12 @@ +language: go + +before_install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls + +script: +- go test -bench=. -benchmem -timeout 9m -coverprofile coverage.out +- $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken=$COVERALLS_TOKEN +- go test -bench=Random -benchtime 3m -timeout 9m -benchmem -coverprofile coverage.out + +go: "1.15" diff --git a/vendor/github.com/PaesslerAG/gval/LICENSE b/vendor/github.com/PaesslerAG/gval/LICENSE new file mode 100644 index 000000000..0716dbca1 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/LICENSE @@ -0,0 +1,12 @@ +Copyright (c) 2017, Paessler AG +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/gval/README.md b/vendor/github.com/PaesslerAG/gval/README.md new file mode 100644 index 000000000..d7aff3f11 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/README.md @@ -0,0 +1,163 @@ +# Gval + +[![Go Reference](https://pkg.go.dev/badge/github.com/PaesslerAG/gval.svg)](https://pkg.go.dev/github.com/PaesslerAG/gval) +[![Build Status](https://api.travis-ci.org/PaesslerAG/gval.svg?branch=master)](https://travis-ci.org/PaesslerAG/gval) +[![Coverage Status](https://coveralls.io/repos/github/PaesslerAG/gval/badge.svg?branch=master)](https://coveralls.io/github/PaesslerAG/gval?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/PaesslerAG/gval)](https://goreportcard.com/report/github.com/PaesslerAG/gval) + +Gval (Go eVALuate) provides support for evaluating arbitrary expressions, in particular Go-like expressions. + +![gopher](./prtg-batmin-gopher.png) + +## Evaluate + +Gval can evaluate expressions with parameters, arimethetic, logical, and string operations: + +- basic expression: [10 > 0](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Basic) +- parameterized expression: [foo > 0](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Parameter) +- nested parameterized expression: [foo.bar > 0](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-NestedParameter) +- arithmetic expression: [(requests_made * requests_succeeded / 100) >= 90](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Arithmetic) +- string expression: [http_response_body == "service is ok"](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-String) +- float64 expression: [(mem_used / total_mem) * 100](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Float64) + +It can easily be extended with custom functions or operators: + +- custom date comparator: [date(\`2014-01-02\`) > date(\`2014-01-01 23:59:59\`)](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-DateComparison) +- string length: [strlen("someReallyLongInputString") <= 16](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Strlen) + +You can parse gval.Expressions once and re-use them multiple times. Parsing is the compute-intensive phase of the process, so if you intend to use the same expression with different parameters, just parse it once: + +- [Parsing and Evaluation](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluable) + +The normal Go-standard order of operators is respected. When writing an expression, be sure that you either order the operators correctly, or use parentheses to clarify which portions of an expression should be run first. + +Strings, numbers, and booleans can be used like in Go: + +- [(7 < "47" == true ? "hello world!\n\u263a") + \` more text\`](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Encoding) + +## Parameter + +Variables can be accessed via string literals. They can be used for values with string keys if the parameter is a `map[string]interface{}` or `map[interface{}]interface{}` and for fields or methods if the parameter is a struct. + +- [foo > 0](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Parameter) + +### Bracket Selector + +Map and array elements and Struct Field can be accessed via `[]`. + +- [foo[0]](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Array) +- [foo["b" + "a" + "r"]](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-ExampleEvaluate_ComplexAccessor) + +### Dot Selector + +A nested variable with a name containing only letters and underscores can be accessed via a dot selector. + +- [foo.bar > 0](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-NestedParameter) + +### Custom Selector + +Parameter names like `response-time` will be interpreted as `response` minus `time`. While gval doesn't support these parameter names directly, you can easily access them via a custom extension like [JSON Path](https://github.com/PaesslerAG/jsonpath): + +- [$["response-time"]](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Jsonpath) + +Jsonpath is also suitable for accessing array elements. + +### Fields and Methods + +If you have structs in your parameters, you can access their fields and methods in the usual way: + +- [foo.Hello + foo.World()](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-FlatAccessor) + +It also works if the parameter is a struct directly +[Hello + World()](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-Accessor) +or if the fields are nested +[foo.Hello + foo.World()](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Evaluate-NestedAccessor) + +This may be convenient but note that using accessors on strucs makes the expression about four times slower than just using a parameter (consult the benchmarks for more precise measurements on your system). If there are functions you want to use, it's faster (and probably cleaner) to define them as functions (see the Evaluate section). These approaches use no reflection, and are designed to be fast and clean. + +## Default Language + +The default language is in serveral sub languages like text, arithmetic or propositional logic defined. See [Godoc](https://pkg.go.dev/github.com/PaesslerAG/gval/#Gval) for details. All sub languages are merged into gval.Full which contains the following elements: + +- Modifiers: `+` `-` `/` `*` `&` `|` `^` `**` `%` `>>` `<<` +- Comparators: `>` `>=` `<` `<=` `==` `!=` `=~` `!~` +- Logical ops: `||` `&&` +- Numeric constants, as 64-bit floating point (`12345.678`) +- String constants (double quotes: `"foobar"`) +- Date function 'Date(x)', using any permutation of RFC3339, ISO8601, ruby date, or unix date +- Boolean constants: `true` `false` +- Parentheses to control order of evaluation `(` `)` +- Json Arrays : `[1, 2, "foo"]` +- Json Objects : `{"a":1, "b":2, "c":"foo"}` +- Prefixes: `!` `-` `~` +- Ternary conditional: `?` `:` +- Null coalescence: `??` + +## Customize + +Gval is completly customizable. Every constant, function or operator can be defined separately and existing expression languages can be reused: + +- [foo.Hello + foo.World()](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-Language) + +For details see [Godoc](https://pkg.go.dev/github.com/PaesslerAG/gval). + +### Implementing custom selector + +In a case you want to provide custom logic for selectors you can implement `SelectGVal(ctx context.Context, k string) (interface{}, error)` on your struct. +Function receives next part of the path and can return any type of var that is again evaluated through standard gval procedures. + +[Example Custom Selector](https://pkg.go.dev/github.com/PaesslerAG/gval/#example-custom-selector) + +### External gval Languages + +A list of external libraries for gval. Feel free to add your own library. + +- [gvalstrings](https://github.com/generikvault/gvalstrings) parse single quoted strings in gval. +- [jsonpath](https://github.com/PaesslerAG/jsonpath) full support for jsonpath in gval. + +## Performance + +The library is built with the intention of being quick but has not been aggressively profiled and optimized. For most applications, though, it is completely fine. +If performance is an issue, make sure to create your expression language with all functions, constants and operators only once. Evaluating an expression like gval.Evaluate("expression, const1, func1, func2, ...) creates a new gval.Language everytime it is called and slows execution. + +The library comes with a bunch of benchmarks to measure the performance of parsing and evaluating expressions. You can run them with `go test -bench=.`. + +For a very rough idea of performance, here are the results from a benchmark run on a Dell Latitude E7470 Win 10 i5-6300U. + +``` text +BenchmarkGval/const_evaluation-4 500000000 3.57 ns/op +BenchmarkGval/const_parsing-4 1000000 1144 ns/op +BenchmarkGval/single_parameter_evaluation-4 10000000 165 ns/op +BenchmarkGval/single_parameter_parsing-4 1000000 1648 ns/op +BenchmarkGval/parameter_evaluation-4 5000000 352 ns/op +BenchmarkGval/parameter_parsing-4 500000 2773 ns/op +BenchmarkGval/common_evaluation-4 3000000 434 ns/op +BenchmarkGval/common_parsing-4 300000 4419 ns/op +BenchmarkGval/complex_evaluation-4 100000000 11.6 ns/op +BenchmarkGval/complex_parsing-4 100000 17936 ns/op +BenchmarkGval/literal_evaluation-4 300000000 3.84 ns/op +BenchmarkGval/literal_parsing-4 500000 2559 ns/op +BenchmarkGval/modifier_evaluation-4 500000000 3.54 ns/op +BenchmarkGval/modifier_parsing-4 500000 3755 ns/op +BenchmarkGval/regex_evaluation-4 50000 21347 ns/op +BenchmarkGval/regex_parsing-4 200000 6480 ns/op +BenchmarkGval/constant_regex_evaluation-4 1000000 1000 ns/op +BenchmarkGval/constant_regex_parsing-4 200000 9417 ns/op +BenchmarkGval/accessors_evaluation-4 3000000 417 ns/op +BenchmarkGval/accessors_parsing-4 1000000 1778 ns/op +BenchmarkGval/accessors_method_evaluation-4 1000000 1931 ns/op +BenchmarkGval/accessors_method_parsing-4 1000000 1729 ns/op +BenchmarkGval/accessors_method_parameter_evaluation-4 1000000 2162 ns/op +BenchmarkGval/accessors_method_parameter_parsing-4 500000 2618 ns/op +BenchmarkGval/nested_accessors_evaluation-4 2000000 681 ns/op +BenchmarkGval/nested_accessors_parsing-4 1000000 2115 ns/op +BenchmarkRandom-4 500000 3631 ns/op +ok +``` + +## API Breaks + +Gval is designed with easy expandability in mind and API breaks will be avoided if possible. If API breaks are unavoidable they wil be explicitly stated via an increased major version number. + +------------------------------------- +Credits to Reene French for the gophers. diff --git a/vendor/github.com/PaesslerAG/gval/evaluable.go b/vendor/github.com/PaesslerAG/gval/evaluable.go new file mode 100644 index 000000000..ea857ad68 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/evaluable.go @@ -0,0 +1,363 @@ +package gval + +import ( + "context" + "fmt" + "reflect" + "regexp" + "strconv" + "strings" +) + +// Selector allows for custom variable selection from structs +// +// Return value is again handled with variable() until end of the given path +type Selector interface { + SelectGVal(c context.Context, key string) (interface{}, error) +} + +// Evaluable evaluates given parameter +type Evaluable func(c context.Context, parameter interface{}) (interface{}, error) + +// EvalInt evaluates given parameter to an int +func (e Evaluable) EvalInt(c context.Context, parameter interface{}) (int, error) { + v, err := e(c, parameter) + if err != nil { + return 0, err + } + + f, ok := convertToFloat(v) + if !ok { + return 0, fmt.Errorf("expected number but got %v (%T)", v, v) + } + return int(f), nil +} + +// EvalFloat64 evaluates given parameter to a float64 +func (e Evaluable) EvalFloat64(c context.Context, parameter interface{}) (float64, error) { + v, err := e(c, parameter) + if err != nil { + return 0, err + } + + f, ok := convertToFloat(v) + if !ok { + return 0, fmt.Errorf("expected number but got %v (%T)", v, v) + } + return f, nil +} + +// EvalBool evaluates given parameter to a bool +func (e Evaluable) EvalBool(c context.Context, parameter interface{}) (bool, error) { + v, err := e(c, parameter) + if err != nil { + return false, err + } + + b, ok := convertToBool(v) + if !ok { + return false, fmt.Errorf("expected bool but got %v (%T)", v, v) + } + return b, nil +} + +// EvalString evaluates given parameter to a string +func (e Evaluable) EvalString(c context.Context, parameter interface{}) (string, error) { + o, err := e(c, parameter) + if err != nil { + return "", err + } + return fmt.Sprintf("%v", o), nil +} + +// Const Evaluable represents given constant +func (*Parser) Const(value interface{}) Evaluable { + return constant(value) +} + +//go:noinline +func constant(value interface{}) Evaluable { + return func(c context.Context, v interface{}) (interface{}, error) { + return value, nil + } +} + +// Var Evaluable represents value at given path. +// It supports with default language VariableSelector: +// +// map[interface{}]interface{}, +// map[string]interface{} and +// []interface{} and via reflect +// struct fields, +// struct methods, +// slices and +// map with int or string key. +func (p *Parser) Var(path ...Evaluable) Evaluable { + if p.selector == nil { + return variable(path) + } + return p.selector(path) +} + +// Evaluables is a slice of Evaluable. +type Evaluables []Evaluable + +// EvalStrings evaluates given parameter to a string slice +func (evs Evaluables) EvalStrings(c context.Context, parameter interface{}) ([]string, error) { + strs := make([]string, len(evs)) + for i, p := range evs { + k, err := p.EvalString(c, parameter) + if err != nil { + return nil, err + } + strs[i] = k + } + return strs, nil +} + +func variable(path Evaluables) Evaluable { + return func(c context.Context, v interface{}) (interface{}, error) { + keys, err := path.EvalStrings(c, v) + if err != nil { + return nil, err + } + for i, k := range keys { + switch o := v.(type) { + case Selector: + v, err = o.SelectGVal(c, k) + if err != nil { + return nil, fmt.Errorf("failed to select '%s' on %T: %w", k, o, err) + } + continue + case map[interface{}]interface{}: + v = o[k] + continue + case map[string]interface{}: + v = o[k] + continue + case []interface{}: + if i, err := strconv.Atoi(k); err == nil && i >= 0 && len(o) > i { + v = o[i] + continue + } + default: + var ok bool + v, ok = reflectSelect(k, o) + if !ok { + return nil, fmt.Errorf("unknown parameter %s", strings.Join(keys[:i+1], ".")) + } + } + } + return v, nil + } +} + +func reflectSelect(key string, value interface{}) (selection interface{}, ok bool) { + vv := reflect.ValueOf(value) + vvElem := resolvePotentialPointer(vv) + + switch vvElem.Kind() { + case reflect.Map: + mapKey, ok := reflectConvertTo(vv.Type().Key().Kind(), key) + if !ok { + return nil, false + } + + vvElem = vv.MapIndex(reflect.ValueOf(mapKey)) + vvElem = resolvePotentialPointer(vvElem) + + if vvElem.IsValid() { + return vvElem.Interface(), true + } + + // key didn't exist. Check if there is a bound method + method := vv.MethodByName(key) + if method.IsValid() { + return method.Interface(), true + } + + case reflect.Slice: + if i, err := strconv.Atoi(key); err == nil && i >= 0 && vv.Len() > i { + vvElem = resolvePotentialPointer(vv.Index(i)) + return vvElem.Interface(), true + } + + // key not an int. Check if there is a bound method + method := vv.MethodByName(key) + if method.IsValid() { + return method.Interface(), true + } + + case reflect.Struct: + field := vvElem.FieldByName(key) + if field.IsValid() { + return field.Interface(), true + } + + method := vv.MethodByName(key) + if method.IsValid() { + return method.Interface(), true + } + } + return nil, false +} + +func resolvePotentialPointer(value reflect.Value) reflect.Value { + if value.Kind() == reflect.Ptr { + return value.Elem() + } + return value +} + +func reflectConvertTo(k reflect.Kind, value string) (interface{}, bool) { + switch k { + case reflect.String: + return value, true + case reflect.Int: + if i, err := strconv.Atoi(value); err == nil { + return i, true + } + } + return nil, false +} + +func (*Parser) callFunc(fun function, args ...Evaluable) Evaluable { + return func(c context.Context, v interface{}) (ret interface{}, err error) { + a := make([]interface{}, len(args)) + for i, arg := range args { + ai, err := arg(c, v) + if err != nil { + return nil, err + } + a[i] = ai + } + return fun(c, a...) + } +} + +func (*Parser) callEvaluable(fullname string, fun Evaluable, args ...Evaluable) Evaluable { + return func(c context.Context, v interface{}) (ret interface{}, err error) { + f, err := fun(c, v) + + if err != nil { + return nil, fmt.Errorf("could not call function: %w", err) + } + + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("failed to execute function '%s': %s", fullname, r) + ret = nil + } + }() + + ff := reflect.ValueOf(f) + + if ff.Kind() != reflect.Func { + return nil, fmt.Errorf("could not call '%s' type %T", fullname, f) + } + + a := make([]reflect.Value, len(args)) + for i := range args { + arg, err := args[i](c, v) + if err != nil { + return nil, err + } + a[i] = reflect.ValueOf(arg) + } + + rr := ff.Call(a) + + r := make([]interface{}, len(rr)) + for i, e := range rr { + r[i] = e.Interface() + } + + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if len(r) > 0 && ff.Type().Out(len(r)-1).Implements(errorInterface) { + if r[len(r)-1] != nil { + err = r[len(r)-1].(error) + } + r = r[0 : len(r)-1] + } + + switch len(r) { + case 0: + return err, nil + case 1: + return r[0], err + default: + return r, err + } + } +} + +// IsConst returns if the Evaluable is a Parser.Const() value +func (e Evaluable) IsConst() bool { + pc := reflect.ValueOf(constant(nil)).Pointer() + pe := reflect.ValueOf(e).Pointer() + return pc == pe +} + +func regEx(a, b Evaluable) (Evaluable, error) { + if !b.IsConst() { + return func(c context.Context, o interface{}) (interface{}, error) { + a, err := a.EvalString(c, o) + if err != nil { + return nil, err + } + b, err := b.EvalString(c, o) + if err != nil { + return nil, err + } + matched, err := regexp.MatchString(b, a) + return matched, err + }, nil + } + s, err := b.EvalString(context.TODO(), nil) + if err != nil { + return nil, err + } + regex, err := regexp.Compile(s) + if err != nil { + return nil, err + } + return func(c context.Context, v interface{}) (interface{}, error) { + s, err := a.EvalString(c, v) + if err != nil { + return nil, err + } + return regex.MatchString(s), nil + }, nil +} + +func notRegEx(a, b Evaluable) (Evaluable, error) { + if !b.IsConst() { + return func(c context.Context, o interface{}) (interface{}, error) { + a, err := a.EvalString(c, o) + if err != nil { + return nil, err + } + b, err := b.EvalString(c, o) + if err != nil { + return nil, err + } + matched, err := regexp.MatchString(b, a) + return !matched, err + }, nil + } + s, err := b.EvalString(context.TODO(), nil) + if err != nil { + return nil, err + } + regex, err := regexp.Compile(s) + if err != nil { + return nil, err + } + return func(c context.Context, v interface{}) (interface{}, error) { + s, err := a.EvalString(c, v) + if err != nil { + return nil, err + } + return !regex.MatchString(s), nil + }, nil +} diff --git a/vendor/github.com/PaesslerAG/gval/functions.go b/vendor/github.com/PaesslerAG/gval/functions.go new file mode 100644 index 000000000..ca77b647d --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/functions.go @@ -0,0 +1,128 @@ +package gval + +import ( + "context" + "fmt" + "reflect" +) + +type function func(ctx context.Context, arguments ...interface{}) (interface{}, error) + +func toFunc(f interface{}) function { + if f, ok := f.(func(arguments ...interface{}) (interface{}, error)); ok { + return function(func(ctx context.Context, arguments ...interface{}) (interface{}, error) { + var v interface{} + errCh := make(chan error, 1) + go func() { + defer func() { + if recovered := recover(); recovered != nil { + errCh <- fmt.Errorf("%v", recovered) + } + }() + result, err := f(arguments...) + v = result + errCh <- err + }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-errCh: + close(errCh) + return v, err + } + }) + } + if f, ok := f.(func(ctx context.Context, arguments ...interface{}) (interface{}, error)); ok { + return function(f) + } + + fun := reflect.ValueOf(f) + t := fun.Type() + return func(ctx context.Context, args ...interface{}) (interface{}, error) { + var v interface{} + errCh := make(chan error, 1) + go func() { + defer func() { + if recovered := recover(); recovered != nil { + errCh <- fmt.Errorf("%v", recovered) + } + }() + in, err := createCallArguments(ctx, t, args) + if err != nil { + errCh <- err + return + } + out := fun.Call(in) + + r := make([]interface{}, len(out)) + for i, e := range out { + r[i] = e.Interface() + } + + err = nil + errorInterface := reflect.TypeOf((*error)(nil)).Elem() + if len(r) > 0 && t.Out(len(r)-1).Implements(errorInterface) { + if r[len(r)-1] != nil { + err = r[len(r)-1].(error) + } + r = r[0 : len(r)-1] + } + + switch len(r) { + case 0: + v = nil + case 1: + v = r[0] + default: + v = r + } + errCh <- err + }() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case err := <-errCh: + close(errCh) + return v, err + } + } +} + +func createCallArguments(ctx context.Context, t reflect.Type, args []interface{}) ([]reflect.Value, error) { + variadic := t.IsVariadic() + numIn := t.NumIn() + + // if first argument is a context, use the given execution context + if numIn > 0 { + thisFun := reflect.ValueOf(createCallArguments) + thisT := thisFun.Type() + if t.In(0) == thisT.In(0) { + args = append([]interface{}{ctx}, args...) + } + } + + if (!variadic && len(args) != numIn) || (variadic && len(args) < numIn-1) { + return nil, fmt.Errorf("invalid number of parameters") + } + + in := make([]reflect.Value, len(args)) + var inType reflect.Type + for i, arg := range args { + if !variadic || i < numIn-1 { + inType = t.In(i) + } else if i == numIn-1 { + inType = t.In(numIn - 1).Elem() + } + argVal := reflect.ValueOf(arg) + if arg == nil { + argVal = reflect.ValueOf(reflect.Interface) + } else if !argVal.Type().AssignableTo(inType) { + return nil, fmt.Errorf("expected type %s for parameter %d but got %T", + inType.String(), i, arg) + } + in[i] = argVal + } + return in, nil +} diff --git a/vendor/github.com/PaesslerAG/gval/gval.go b/vendor/github.com/PaesslerAG/gval/gval.go new file mode 100644 index 000000000..af42142fd --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/gval.go @@ -0,0 +1,336 @@ +// Package gval provides a generic expression language. +// All functions, infix and prefix operators can be replaced by composing languages into a new one. +// +// The package contains concrete expression languages for common application in text, arithmetic, decimal arithmetic, propositional logic and so on. +// They can be used as basis for a custom expression language or to evaluate expressions directly. +package gval + +import ( + "context" + "fmt" + "math" + "reflect" + "text/scanner" + "time" + + "github.com/shopspring/decimal" +) + +// Evaluate given parameter with given expression in gval full language +func Evaluate(expression string, parameter interface{}, opts ...Language) (interface{}, error) { + return EvaluateWithContext(context.Background(), expression, parameter, opts...) +} + +// Evaluate given parameter with given expression in gval full language using a context +func EvaluateWithContext(c context.Context, expression string, parameter interface{}, opts ...Language) (interface{}, error) { + l := full + if len(opts) > 0 { + l = NewLanguage(append([]Language{l}, opts...)...) + } + return l.EvaluateWithContext(c, expression, parameter) +} + +// Full is the union of Arithmetic, Bitmask, Text, PropositionalLogic, TernaryOperator, and Json +// +// Operator in: a in b is true iff value a is an element of array b +// Operator ??: a ?? b returns a if a is not false or nil, otherwise n +// +// Function Date: Date(a) parses string a. a must match RFC3339, ISO8601, ruby date, or unix date +func Full(extensions ...Language) Language { + if len(extensions) == 0 { + return full + } + return NewLanguage(append([]Language{full}, extensions...)...) +} + +// TernaryOperator contains following Operator +// +// ?: a ? b : c returns b if bool a is true, otherwise b +func TernaryOperator() Language { + return ternaryOperator +} + +// Arithmetic contains base, plus(+), minus(-), divide(/), power(**), negative(-) +// and numerical order (<=,<,>,>=) +// +// Arithmetic operators expect float64 operands. +// Called with unfitting input, they try to convert the input to float64. +// They can parse strings and convert any type of int or float. +func Arithmetic() Language { + return arithmetic +} + +// DecimalArithmetic contains base, plus(+), minus(-), divide(/), power(**), negative(-) +// and numerical order (<=,<,>,>=) +// +// DecimalArithmetic operators expect decimal.Decimal operands (github.com/shopspring/decimal) +// and are used to calculate money/decimal rather than floating point calculations. +// Called with unfitting input, they try to convert the input to decimal.Decimal. +// They can parse strings and convert any type of int or float. +func DecimalArithmetic() Language { + return decimalArithmetic +} + +// Bitmask contains base, bitwise and(&), bitwise or(|) and bitwise not(^). +// +// Bitmask operators expect float64 operands. +// Called with unfitting input they try to convert the input to float64. +// They can parse strings and convert any type of int or float. +func Bitmask() Language { + return bitmask +} + +// Text contains base, lexical order on strings (<=,<,>,>=), +// regex match (=~) and regex not match (!~) +func Text() Language { + return text +} + +// PropositionalLogic contains base, not(!), and (&&), or (||) and Base. +// +// Propositional operator expect bool operands. +// Called with unfitting input they try to convert the input to bool. +// Numbers other than 0 and the strings "TRUE" and "true" are interpreted as true. +// 0 and the strings "FALSE" and "false" are interpreted as false. +func PropositionalLogic() Language { + return propositionalLogic +} + +// JSON contains json objects ({string:expression,...}) +// and json arrays ([expression, ...]) +func JSON() Language { + return ljson +} + +// Parentheses contains support for parentheses. +func Parentheses() Language { + return parentheses +} + +// Ident contains support for variables and functions. +func Ident() Language { + return ident +} + +// Base contains equal (==) and not equal (!=), perentheses and general support for variables, constants and functions +// It contains true, false, (floating point) number, string ("" or “) and char (”) constants +func Base() Language { + return base +} + +var full = NewLanguage(arithmetic, bitmask, text, propositionalLogic, ljson, + + InfixOperator("in", inArray), + + InfixShortCircuit("??", func(a interface{}) (interface{}, bool) { + v := reflect.ValueOf(a) + return a, a != nil && !v.IsZero() + }), + InfixOperator("??", func(a, b interface{}) (interface{}, error) { + if v := reflect.ValueOf(a); a == nil || v.IsZero() { + return b, nil + } + return a, nil + }), + + ternaryOperator, + + Function("date", func(arguments ...interface{}) (interface{}, error) { + if len(arguments) != 1 { + return nil, fmt.Errorf("date() expects exactly one string argument") + } + s, ok := arguments[0].(string) + if !ok { + return nil, fmt.Errorf("date() expects exactly one string argument") + } + for _, format := range [...]string{ + time.ANSIC, + time.UnixDate, + time.RubyDate, + time.Kitchen, + time.RFC3339, + time.RFC3339Nano, + "2006-01-02", // RFC 3339 + "2006-01-02 15:04", // RFC 3339 with minutes + "2006-01-02 15:04:05", // RFC 3339 with seconds + "2006-01-02 15:04:05-07:00", // RFC 3339 with seconds and timezone + "2006-01-02T15Z0700", // ISO8601 with hour + "2006-01-02T15:04Z0700", // ISO8601 with minutes + "2006-01-02T15:04:05Z0700", // ISO8601 with seconds + "2006-01-02T15:04:05.999999999Z0700", // ISO8601 with nanoseconds + } { + ret, err := time.ParseInLocation(format, s, time.Local) + if err == nil { + return ret, nil + } + } + return nil, fmt.Errorf("date() could not parse %s", s) + }), +) + +var ternaryOperator = PostfixOperator("?", parseIf) + +var ljson = NewLanguage( + PrefixExtension('[', parseJSONArray), + PrefixExtension('{', parseJSONObject), +) + +var arithmetic = NewLanguage( + InfixNumberOperator("+", func(a, b float64) (interface{}, error) { return a + b, nil }), + InfixNumberOperator("-", func(a, b float64) (interface{}, error) { return a - b, nil }), + InfixNumberOperator("*", func(a, b float64) (interface{}, error) { return a * b, nil }), + InfixNumberOperator("/", func(a, b float64) (interface{}, error) { return a / b, nil }), + InfixNumberOperator("%", func(a, b float64) (interface{}, error) { return math.Mod(a, b), nil }), + InfixNumberOperator("**", func(a, b float64) (interface{}, error) { return math.Pow(a, b), nil }), + + InfixNumberOperator(">", func(a, b float64) (interface{}, error) { return a > b, nil }), + InfixNumberOperator(">=", func(a, b float64) (interface{}, error) { return a >= b, nil }), + InfixNumberOperator("<", func(a, b float64) (interface{}, error) { return a < b, nil }), + InfixNumberOperator("<=", func(a, b float64) (interface{}, error) { return a <= b, nil }), + + InfixNumberOperator("==", func(a, b float64) (interface{}, error) { return a == b, nil }), + InfixNumberOperator("!=", func(a, b float64) (interface{}, error) { return a != b, nil }), + + base, +) + +var decimalArithmetic = NewLanguage( + InfixDecimalOperator("+", func(a, b decimal.Decimal) (interface{}, error) { return a.Add(b), nil }), + InfixDecimalOperator("-", func(a, b decimal.Decimal) (interface{}, error) { return a.Sub(b), nil }), + InfixDecimalOperator("*", func(a, b decimal.Decimal) (interface{}, error) { return a.Mul(b), nil }), + InfixDecimalOperator("/", func(a, b decimal.Decimal) (interface{}, error) { return a.Div(b), nil }), + InfixDecimalOperator("%", func(a, b decimal.Decimal) (interface{}, error) { return a.Mod(b), nil }), + InfixDecimalOperator("**", func(a, b decimal.Decimal) (interface{}, error) { return a.Pow(b), nil }), + + InfixDecimalOperator(">", func(a, b decimal.Decimal) (interface{}, error) { return a.GreaterThan(b), nil }), + InfixDecimalOperator(">=", func(a, b decimal.Decimal) (interface{}, error) { return a.GreaterThanOrEqual(b), nil }), + InfixDecimalOperator("<", func(a, b decimal.Decimal) (interface{}, error) { return a.LessThan(b), nil }), + InfixDecimalOperator("<=", func(a, b decimal.Decimal) (interface{}, error) { return a.LessThanOrEqual(b), nil }), + + InfixDecimalOperator("==", func(a, b decimal.Decimal) (interface{}, error) { return a.Equal(b), nil }), + InfixDecimalOperator("!=", func(a, b decimal.Decimal) (interface{}, error) { return !a.Equal(b), nil }), + base, + //Base is before these overrides so that the Base options are overridden + PrefixExtension(scanner.Int, parseDecimal), + PrefixExtension(scanner.Float, parseDecimal), + PrefixOperator("-", func(c context.Context, v interface{}) (interface{}, error) { + i, ok := convertToFloat(v) + if !ok { + return nil, fmt.Errorf("unexpected %v(%T) expected number", v, v) + } + return decimal.NewFromFloat(i).Neg(), nil + }), +) + +var bitmask = NewLanguage( + InfixNumberOperator("^", func(a, b float64) (interface{}, error) { return float64(int64(a) ^ int64(b)), nil }), + InfixNumberOperator("&", func(a, b float64) (interface{}, error) { return float64(int64(a) & int64(b)), nil }), + InfixNumberOperator("|", func(a, b float64) (interface{}, error) { return float64(int64(a) | int64(b)), nil }), + InfixNumberOperator("<<", func(a, b float64) (interface{}, error) { return float64(int64(a) << uint64(b)), nil }), + InfixNumberOperator(">>", func(a, b float64) (interface{}, error) { return float64(int64(a) >> uint64(b)), nil }), + + PrefixOperator("~", func(c context.Context, v interface{}) (interface{}, error) { + i, ok := convertToFloat(v) + if !ok { + return nil, fmt.Errorf("unexpected %T expected number", v) + } + return float64(^int64(i)), nil + }), +) + +var text = NewLanguage( + InfixTextOperator("+", func(a, b string) (interface{}, error) { return fmt.Sprintf("%v%v", a, b), nil }), + + InfixTextOperator("<", func(a, b string) (interface{}, error) { return a < b, nil }), + InfixTextOperator("<=", func(a, b string) (interface{}, error) { return a <= b, nil }), + InfixTextOperator(">", func(a, b string) (interface{}, error) { return a > b, nil }), + InfixTextOperator(">=", func(a, b string) (interface{}, error) { return a >= b, nil }), + + InfixEvalOperator("=~", regEx), + InfixEvalOperator("!~", notRegEx), + base, +) + +var propositionalLogic = NewLanguage( + PrefixOperator("!", func(c context.Context, v interface{}) (interface{}, error) { + b, ok := convertToBool(v) + if !ok { + return nil, fmt.Errorf("unexpected %T expected bool", v) + } + return !b, nil + }), + + InfixShortCircuit("&&", func(a interface{}) (interface{}, bool) { return false, a == false }), + InfixBoolOperator("&&", func(a, b bool) (interface{}, error) { return a && b, nil }), + InfixShortCircuit("||", func(a interface{}) (interface{}, bool) { return true, a == true }), + InfixBoolOperator("||", func(a, b bool) (interface{}, error) { return a || b, nil }), + + InfixBoolOperator("==", func(a, b bool) (interface{}, error) { return a == b, nil }), + InfixBoolOperator("!=", func(a, b bool) (interface{}, error) { return a != b, nil }), + + base, +) + +var parentheses = NewLanguage( + PrefixExtension('(', parseParentheses), +) + +var ident = NewLanguage( + PrefixMetaPrefix(scanner.Ident, parseIdent), +) + +var base = NewLanguage( + PrefixExtension(scanner.Int, parseNumber), + PrefixExtension(scanner.Float, parseNumber), + PrefixOperator("-", func(c context.Context, v interface{}) (interface{}, error) { + i, ok := convertToFloat(v) + if !ok { + return nil, fmt.Errorf("unexpected %v(%T) expected number", v, v) + } + return -i, nil + }), + + PrefixExtension(scanner.String, parseString), + PrefixExtension(scanner.Char, parseString), + PrefixExtension(scanner.RawString, parseString), + + Constant("true", true), + Constant("false", false), + + InfixOperator("==", func(a, b interface{}) (interface{}, error) { return reflect.DeepEqual(a, b), nil }), + InfixOperator("!=", func(a, b interface{}) (interface{}, error) { return !reflect.DeepEqual(a, b), nil }), + parentheses, + + Precedence("??", 0), + + Precedence("||", 20), + Precedence("&&", 21), + + Precedence("==", 40), + Precedence("!=", 40), + Precedence(">", 40), + Precedence(">=", 40), + Precedence("<", 40), + Precedence("<=", 40), + Precedence("=~", 40), + Precedence("!~", 40), + Precedence("in", 40), + + Precedence("^", 60), + Precedence("&", 60), + Precedence("|", 60), + + Precedence("<<", 90), + Precedence(">>", 90), + + Precedence("+", 120), + Precedence("-", 120), + + Precedence("*", 150), + Precedence("/", 150), + Precedence("%", 150), + + Precedence("**", 200), + + ident, +) diff --git a/vendor/github.com/PaesslerAG/gval/language.go b/vendor/github.com/PaesslerAG/gval/language.go new file mode 100644 index 000000000..ddaefa362 --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/language.go @@ -0,0 +1,281 @@ +package gval + +import ( + "context" + "fmt" + "text/scanner" + "unicode" + + "github.com/shopspring/decimal" +) + +// Language is an expression language +type Language struct { + prefixes map[interface{}]extension + operators map[string]operator + operatorSymbols map[rune]struct{} + init extension + def extension + selector func(Evaluables) Evaluable +} + +// NewLanguage returns the union of given Languages as new Language. +func NewLanguage(bases ...Language) Language { + l := newLanguage() + for _, base := range bases { + for i, e := range base.prefixes { + l.prefixes[i] = e + } + for i, e := range base.operators { + l.operators[i] = e.merge(l.operators[i]) + l.operators[i].initiate(i) + } + for i := range base.operatorSymbols { + l.operatorSymbols[i] = struct{}{} + } + if base.init != nil { + l.init = base.init + } + if base.def != nil { + l.def = base.def + } + if base.selector != nil { + l.selector = base.selector + } + } + return l +} + +func newLanguage() Language { + return Language{ + prefixes: map[interface{}]extension{}, + operators: map[string]operator{}, + operatorSymbols: map[rune]struct{}{}, + } +} + +// NewEvaluable returns an Evaluable for given expression in the specified language +func (l Language) NewEvaluable(expression string) (Evaluable, error) { + return l.NewEvaluableWithContext(context.Background(), expression) +} + +// NewEvaluableWithContext returns an Evaluable for given expression in the specified language using context +func (l Language) NewEvaluableWithContext(c context.Context, expression string) (Evaluable, error) { + p := newParser(expression, l) + + eval, err := p.parse(c) + if err == nil && p.isCamouflaged() && p.lastScan != scanner.EOF { + err = p.camouflage + } + if err != nil { + pos := p.scanner.Pos() + return nil, fmt.Errorf("parsing error: %s - %d:%d %w", p.scanner.Position, pos.Line, pos.Column, err) + } + + return eval, nil +} + +// Evaluate given parameter with given expression +func (l Language) Evaluate(expression string, parameter interface{}) (interface{}, error) { + return l.EvaluateWithContext(context.Background(), expression, parameter) +} + +// Evaluate given parameter with given expression using context +func (l Language) EvaluateWithContext(c context.Context, expression string, parameter interface{}) (interface{}, error) { + eval, err := l.NewEvaluableWithContext(c, expression) + if err != nil { + return nil, err + } + v, err := eval(c, parameter) + if err != nil { + return nil, fmt.Errorf("can not evaluate %s: %w", expression, err) + } + return v, nil +} + +// Function returns a Language with given function. +// Function has no conversion for input types. +// +// If the function returns an error it must be the last return parameter. +// +// If the function has (without the error) more then one return parameter, +// it returns them as []interface{}. +func Function(name string, function interface{}) Language { + l := newLanguage() + l.prefixes[name] = func(c context.Context, p *Parser) (eval Evaluable, err error) { + args := []Evaluable{} + scan := p.Scan() + switch scan { + case '(': + args, err = p.parseArguments(c) + if err != nil { + return nil, err + } + default: + p.Camouflage("function call", '(') + } + return p.callFunc(toFunc(function), args...), nil + } + return l +} + +// Constant returns a Language with given constant +func Constant(name string, value interface{}) Language { + l := newLanguage() + l.prefixes[l.makePrefixKey(name)] = func(c context.Context, p *Parser) (eval Evaluable, err error) { + return p.Const(value), nil + } + return l +} + +// PrefixExtension extends a Language +func PrefixExtension(r rune, ext func(context.Context, *Parser) (Evaluable, error)) Language { + l := newLanguage() + l.prefixes[r] = ext + return l +} + +// Init is a language that does no parsing, but invokes the given function when +// parsing starts. It is incumbent upon the function to call ParseExpression to +// continue parsing. +// +// This function can be used to customize the parser settings, such as +// whitespace or ident behavior. +func Init(ext func(context.Context, *Parser) (Evaluable, error)) Language { + l := newLanguage() + l.init = ext + return l +} + +// DefaultExtension is a language that runs the given function if no other +// prefix matches. +func DefaultExtension(ext func(context.Context, *Parser) (Evaluable, error)) Language { + l := newLanguage() + l.def = ext + return l +} + +// PrefixMetaPrefix chooses a Prefix to be executed +func PrefixMetaPrefix(r rune, ext func(context.Context, *Parser) (call string, alternative func() (Evaluable, error), err error)) Language { + l := newLanguage() + l.prefixes[r] = func(c context.Context, p *Parser) (Evaluable, error) { + call, alternative, err := ext(c, p) + if err != nil { + return nil, err + } + if prefix, ok := p.prefixes[l.makePrefixKey(call)]; ok { + return prefix(c, p) + } + return alternative() + } + return l +} + +// PrefixOperator returns a Language with given prefix +func PrefixOperator(name string, e Evaluable) Language { + l := newLanguage() + l.prefixes[l.makePrefixKey(name)] = func(c context.Context, p *Parser) (Evaluable, error) { + eval, err := p.ParseNextExpression(c) + if err != nil { + return nil, err + } + prefix := func(c context.Context, v interface{}) (interface{}, error) { + a, err := eval(c, v) + if err != nil { + return nil, err + } + return e(c, a) + } + if eval.IsConst() { + v, err := prefix(c, nil) + if err != nil { + return nil, err + } + prefix = p.Const(v) + } + return prefix, nil + } + return l +} + +// PostfixOperator extends a Language. +func PostfixOperator(name string, ext func(context.Context, *Parser, Evaluable) (Evaluable, error)) Language { + l := newLanguage() + l.operators[l.makeInfixKey(name)] = postfix{ + f: func(c context.Context, p *Parser, eval Evaluable, pre operatorPrecedence) (Evaluable, error) { + return ext(c, p, eval) + }, + } + return l +} + +// InfixOperator for two arbitrary values. +func InfixOperator(name string, f func(a, b interface{}) (interface{}, error)) Language { + return newLanguageOperator(name, &infix{arbitrary: f}) +} + +// InfixShortCircuit operator is called after the left operand is evaluated. +func InfixShortCircuit(name string, f func(a interface{}) (interface{}, bool)) Language { + return newLanguageOperator(name, &infix{shortCircuit: f}) +} + +// InfixTextOperator for two text values. +func InfixTextOperator(name string, f func(a, b string) (interface{}, error)) Language { + return newLanguageOperator(name, &infix{text: f}) +} + +// InfixNumberOperator for two number values. +func InfixNumberOperator(name string, f func(a, b float64) (interface{}, error)) Language { + return newLanguageOperator(name, &infix{number: f}) +} + +// InfixDecimalOperator for two decimal values. +func InfixDecimalOperator(name string, f func(a, b decimal.Decimal) (interface{}, error)) Language { + return newLanguageOperator(name, &infix{decimal: f}) +} + +// InfixBoolOperator for two bool values. +func InfixBoolOperator(name string, f func(a, b bool) (interface{}, error)) Language { + return newLanguageOperator(name, &infix{boolean: f}) +} + +// Precedence of operator. The Operator with higher operatorPrecedence is evaluated first. +func Precedence(name string, operatorPrecendence uint8) Language { + return newLanguageOperator(name, operatorPrecedence(operatorPrecendence)) +} + +// InfixEvalOperator operates on the raw operands. +// Therefore it cannot be combined with operators for other operand types. +func InfixEvalOperator(name string, f func(a, b Evaluable) (Evaluable, error)) Language { + return newLanguageOperator(name, directInfix{infixBuilder: f}) +} + +func newLanguageOperator(name string, op operator) Language { + op.initiate(name) + l := newLanguage() + l.operators[l.makeInfixKey(name)] = op + return l +} + +func (l *Language) makePrefixKey(key string) interface{} { + runes := []rune(key) + if len(runes) == 1 && !unicode.IsLetter(runes[0]) { + return runes[0] + } + return key +} + +func (l *Language) makeInfixKey(key string) string { + for _, r := range key { + l.operatorSymbols[r] = struct{}{} + } + return key +} + +// VariableSelector returns a Language which uses given variable selector. +// It must be combined with a Language that uses the vatiable selector. E.g. gval.Base(). +func VariableSelector(selector func(path Evaluables) Evaluable) Language { + l := newLanguage() + l.selector = selector + return l +} diff --git a/vendor/github.com/PaesslerAG/gval/operator.go b/vendor/github.com/PaesslerAG/gval/operator.go new file mode 100644 index 000000000..8e584240f --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/operator.go @@ -0,0 +1,405 @@ +package gval + +import ( + "context" + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/shopspring/decimal" +) + +type stage struct { + Evaluable + infixBuilder + operatorPrecedence +} + +type stageStack []stage //operatorPrecedence in stacktStage is continuously, monotone ascending + +func (s *stageStack) push(b stage) error { + for len(*s) > 0 && s.peek().operatorPrecedence >= b.operatorPrecedence { + a := s.pop() + eval, err := a.infixBuilder(a.Evaluable, b.Evaluable) + if err != nil { + return err + } + if a.IsConst() && b.IsConst() { + v, err := eval(nil, nil) + if err != nil { + return err + } + b.Evaluable = constant(v) + continue + } + b.Evaluable = eval + } + *s = append(*s, b) + return nil +} + +func (s *stageStack) peek() stage { + return (*s)[len(*s)-1] +} + +func (s *stageStack) pop() stage { + a := s.peek() + (*s) = (*s)[:len(*s)-1] + return a +} + +type infixBuilder func(a, b Evaluable) (Evaluable, error) + +func (l Language) isSymbolOperation(r rune) bool { + _, in := l.operatorSymbols[r] + return in +} + +func (l Language) isOperatorPrefix(op string) bool { + for k := range l.operators { + if strings.HasPrefix(k, op) { + return true + } + } + return false +} + +func (op *infix) initiate(name string) { + f := func(a, b interface{}) (interface{}, error) { + return nil, fmt.Errorf("invalid operation (%T) %s (%T)", a, name, b) + } + if op.arbitrary != nil { + f = op.arbitrary + } + for _, typeConvertion := range []bool{true, false} { + if op.text != nil && (!typeConvertion || op.arbitrary == nil) { + f = getStringOpFunc(op.text, f, typeConvertion) + } + if op.boolean != nil { + f = getBoolOpFunc(op.boolean, f, typeConvertion) + } + if op.number != nil { + f = getFloatOpFunc(op.number, f, typeConvertion) + } + if op.decimal != nil { + f = getDecimalOpFunc(op.decimal, f, typeConvertion) + } + } + if op.shortCircuit == nil { + op.builder = func(a, b Evaluable) (Evaluable, error) { + return func(c context.Context, x interface{}) (interface{}, error) { + a, err := a(c, x) + if err != nil { + return nil, err + } + b, err := b(c, x) + if err != nil { + return nil, err + } + return f(a, b) + }, nil + } + return + } + shortF := op.shortCircuit + op.builder = func(a, b Evaluable) (Evaluable, error) { + return func(c context.Context, x interface{}) (interface{}, error) { + a, err := a(c, x) + if err != nil { + return nil, err + } + if r, ok := shortF(a); ok { + return r, nil + } + b, err := b(c, x) + if err != nil { + return nil, err + } + return f(a, b) + }, nil + } +} + +type opFunc func(a, b interface{}) (interface{}, error) + +func getStringOpFunc(s func(a, b string) (interface{}, error), f opFunc, typeConversion bool) opFunc { + if typeConversion { + return func(a, b interface{}) (interface{}, error) { + if a != nil && b != nil { + return s(fmt.Sprintf("%v", a), fmt.Sprintf("%v", b)) + } + return f(a, b) + } + } + return func(a, b interface{}) (interface{}, error) { + s1, k := a.(string) + s2, l := b.(string) + if k && l { + return s(s1, s2) + } + return f(a, b) + } +} +func convertToBool(o interface{}) (bool, bool) { + if b, ok := o.(bool); ok { + return b, true + } + v := reflect.ValueOf(o) + + if v.Kind() == reflect.Func { + if vt := v.Type(); vt.NumIn() == 0 && vt.NumOut() == 1 { + retType := vt.Out(0) + + if retType.Kind() == reflect.Bool { + funcResults := v.Call([]reflect.Value{}) + v = funcResults[0] + o = v.Interface() + } + } + } + + for o != nil && v.Kind() == reflect.Ptr { + v = v.Elem() + if !v.IsValid() { + return false, false + } + o = v.Interface() + } + + if o == false || o == nil || o == "false" || o == "FALSE" { + return false, true + } + if o == true || o == "true" || o == "TRUE" { + return true, true + } + if f, ok := convertToFloat(o); ok { + return f != 0., true + } + return false, false +} +func getBoolOpFunc(o func(a, b bool) (interface{}, error), f opFunc, typeConversion bool) opFunc { + if typeConversion { + return func(a, b interface{}) (interface{}, error) { + x, k := convertToBool(a) + y, l := convertToBool(b) + if k && l { + return o(x, y) + } + return f(a, b) + } + } + return func(a, b interface{}) (interface{}, error) { + x, k := a.(bool) + y, l := b.(bool) + if k && l { + return o(x, y) + } + return f(a, b) + } +} +func convertToFloat(o interface{}) (float64, bool) { + if i, ok := o.(float64); ok { + return i, true + } + v := reflect.ValueOf(o) + for o != nil && v.Kind() == reflect.Ptr { + v = v.Elem() + if !v.IsValid() { + return 0, false + } + o = v.Interface() + } + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return float64(v.Uint()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + } + if s, ok := o.(string); ok { + f, err := strconv.ParseFloat(s, 64) + if err == nil { + return f, true + } + } + return 0, false +} +func getFloatOpFunc(o func(a, b float64) (interface{}, error), f opFunc, typeConversion bool) opFunc { + if typeConversion { + return func(a, b interface{}) (interface{}, error) { + x, k := convertToFloat(a) + y, l := convertToFloat(b) + if k && l { + return o(x, y) + } + + return f(a, b) + } + } + return func(a, b interface{}) (interface{}, error) { + x, k := a.(float64) + y, l := b.(float64) + if k && l { + return o(x, y) + } + + return f(a, b) + } +} +func convertToDecimal(o interface{}) (decimal.Decimal, bool) { + if i, ok := o.(decimal.Decimal); ok { + return i, true + } + if i, ok := o.(float64); ok { + return decimal.NewFromFloat(i), true + } + v := reflect.ValueOf(o) + for o != nil && v.Kind() == reflect.Ptr { + v = v.Elem() + if !v.IsValid() { + return decimal.Zero, false + } + o = v.Interface() + } + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return decimal.NewFromInt(v.Int()), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return decimal.NewFromFloat(float64(v.Uint())), true + case reflect.Float32, reflect.Float64: + return decimal.NewFromFloat(v.Float()), true + } + if s, ok := o.(string); ok { + f, err := strconv.ParseFloat(s, 64) + if err == nil { + return decimal.NewFromFloat(f), true + } + } + return decimal.Zero, false +} +func getDecimalOpFunc(o func(a, b decimal.Decimal) (interface{}, error), f opFunc, typeConversion bool) opFunc { + if typeConversion { + return func(a, b interface{}) (interface{}, error) { + x, k := convertToDecimal(a) + y, l := convertToDecimal(b) + if k && l { + return o(x, y) + } + + return f(a, b) + } + } + return func(a, b interface{}) (interface{}, error) { + x, k := a.(decimal.Decimal) + y, l := b.(decimal.Decimal) + if k && l { + return o(x, y) + } + + return f(a, b) + } +} + +type operator interface { + merge(operator) operator + precedence() operatorPrecedence + initiate(name string) +} + +type operatorPrecedence uint8 + +func (pre operatorPrecedence) merge(op operator) operator { + if op, ok := op.(operatorPrecedence); ok { + if op > pre { + return op + } + return pre + } + if op == nil { + return pre + } + return op.merge(pre) +} + +func (pre operatorPrecedence) precedence() operatorPrecedence { + return pre +} + +func (pre operatorPrecedence) initiate(name string) {} + +type infix struct { + operatorPrecedence + number func(a, b float64) (interface{}, error) + decimal func(a, b decimal.Decimal) (interface{}, error) + boolean func(a, b bool) (interface{}, error) + text func(a, b string) (interface{}, error) + arbitrary func(a, b interface{}) (interface{}, error) + shortCircuit func(a interface{}) (interface{}, bool) + builder infixBuilder +} + +func (op infix) merge(op2 operator) operator { + switch op2 := op2.(type) { + case *infix: + if op.number == nil { + op.number = op2.number + } + if op.decimal == nil { + op.decimal = op2.decimal + } + if op.boolean == nil { + op.boolean = op2.boolean + } + if op.text == nil { + op.text = op2.text + } + if op.arbitrary == nil { + op.arbitrary = op2.arbitrary + } + if op.shortCircuit == nil { + op.shortCircuit = op2.shortCircuit + } + } + if op2 != nil && op2.precedence() > op.operatorPrecedence { + op.operatorPrecedence = op2.precedence() + } + return &op +} + +type directInfix struct { + operatorPrecedence + infixBuilder +} + +func (op directInfix) merge(op2 operator) operator { + switch op2 := op2.(type) { + case operatorPrecedence: + op.operatorPrecedence = op2 + } + if op2 != nil && op2.precedence() > op.operatorPrecedence { + op.operatorPrecedence = op2.precedence() + } + return op +} + +type extension func(context.Context, *Parser) (Evaluable, error) + +type postfix struct { + operatorPrecedence + f func(context.Context, *Parser, Evaluable, operatorPrecedence) (Evaluable, error) +} + +func (op postfix) merge(op2 operator) operator { + switch op2 := op2.(type) { + case postfix: + if op2.f != nil { + op.f = op2.f + } + } + if op2 != nil && op2.precedence() > op.operatorPrecedence { + op.operatorPrecedence = op2.precedence() + } + return op +} diff --git a/vendor/github.com/PaesslerAG/gval/parse.go b/vendor/github.com/PaesslerAG/gval/parse.go new file mode 100644 index 000000000..623a39bfa --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/parse.go @@ -0,0 +1,347 @@ +package gval + +import ( + "context" + "fmt" + "reflect" + "strconv" + "text/scanner" + + "github.com/shopspring/decimal" +) + +// ParseExpression scans an expression into an Evaluable. +func (p *Parser) ParseExpression(c context.Context) (eval Evaluable, err error) { + stack := stageStack{} + for { + eval, err = p.ParseNextExpression(c) + if err != nil { + return nil, err + } + + if stage, err := p.parseOperator(c, &stack, eval); err != nil { + return nil, err + } else if err = stack.push(stage); err != nil { + return nil, err + } + + if stack.peek().infixBuilder == nil { + return stack.pop().Evaluable, nil + } + } +} + +// ParseNextExpression scans the expression ignoring following operators +func (p *Parser) ParseNextExpression(c context.Context) (eval Evaluable, err error) { + scan := p.Scan() + ex, ok := p.prefixes[scan] + if !ok { + if scan != scanner.EOF && p.def != nil { + return p.def(c, p) + } + return nil, p.Expected("extensions") + } + return ex(c, p) +} + +// ParseSublanguage sets the next language for this parser to parse and calls +// its initialization function, usually ParseExpression. +func (p *Parser) ParseSublanguage(c context.Context, l Language) (Evaluable, error) { + if p.isCamouflaged() { + panic("can not ParseSublanguage() on camouflaged Parser") + } + curLang := p.Language + curWhitespace := p.scanner.Whitespace + curMode := p.scanner.Mode + curIsIdentRune := p.scanner.IsIdentRune + + p.Language = l + p.resetScannerProperties() + + defer func() { + p.Language = curLang + p.scanner.Whitespace = curWhitespace + p.scanner.Mode = curMode + p.scanner.IsIdentRune = curIsIdentRune + }() + + return p.parse(c) +} + +func (p *Parser) parse(c context.Context) (Evaluable, error) { + if p.init != nil { + return p.init(c, p) + } + + return p.ParseExpression(c) +} + +func parseString(c context.Context, p *Parser) (Evaluable, error) { + s, err := strconv.Unquote(p.TokenText()) + if err != nil { + return nil, fmt.Errorf("could not parse string: %w", err) + } + return p.Const(s), nil +} + +func parseNumber(c context.Context, p *Parser) (Evaluable, error) { + n, err := strconv.ParseFloat(p.TokenText(), 64) + if err != nil { + return nil, err + } + return p.Const(n), nil +} + +func parseDecimal(c context.Context, p *Parser) (Evaluable, error) { + n, err := strconv.ParseFloat(p.TokenText(), 64) + if err != nil { + return nil, err + } + return p.Const(decimal.NewFromFloat(n)), nil +} + +func parseParentheses(c context.Context, p *Parser) (Evaluable, error) { + eval, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + switch p.Scan() { + case ')': + return eval, nil + default: + return nil, p.Expected("parentheses", ')') + } +} + +func (p *Parser) parseOperator(c context.Context, stack *stageStack, eval Evaluable) (st stage, err error) { + for { + scan := p.Scan() + op := p.TokenText() + mustOp := false + if p.isSymbolOperation(scan) { + scan = p.Peek() + for p.isSymbolOperation(scan) && p.isOperatorPrefix(op+string(scan)) { + mustOp = true + op += string(scan) + p.Next() + scan = p.Peek() + } + } else if scan != scanner.Ident { + p.Camouflage("operator") + return stage{Evaluable: eval}, nil + } + switch operator := p.operators[op].(type) { + case *infix: + return stage{ + Evaluable: eval, + infixBuilder: operator.builder, + operatorPrecedence: operator.operatorPrecedence, + }, nil + case directInfix: + return stage{ + Evaluable: eval, + infixBuilder: operator.infixBuilder, + operatorPrecedence: operator.operatorPrecedence, + }, nil + case postfix: + if err = stack.push(stage{ + operatorPrecedence: operator.operatorPrecedence, + Evaluable: eval, + }); err != nil { + return stage{}, err + } + eval, err = operator.f(c, p, stack.pop().Evaluable, operator.operatorPrecedence) + if err != nil { + return + } + continue + } + + if !mustOp { + p.Camouflage("operator") + return stage{Evaluable: eval}, nil + } + return stage{}, fmt.Errorf("unknown operator %s", op) + } +} + +func parseIdent(c context.Context, p *Parser) (call string, alternative func() (Evaluable, error), err error) { + token := p.TokenText() + return token, + func() (Evaluable, error) { + fullname := token + + keys := []Evaluable{p.Const(token)} + for { + scan := p.Scan() + switch scan { + case '.': + scan = p.Scan() + switch scan { + case scanner.Ident: + token = p.TokenText() + keys = append(keys, p.Const(token)) + default: + return nil, p.Expected("field", scanner.Ident) + } + case '(': + args, err := p.parseArguments(c) + if err != nil { + return nil, err + } + return p.callEvaluable(fullname, p.Var(keys...), args...), nil + case '[': + key, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + switch p.Scan() { + case ']': + keys = append(keys, key) + default: + return nil, p.Expected("array key", ']') + } + default: + p.Camouflage("variable", '.', '(', '[') + return p.Var(keys...), nil + } + } + }, nil + +} + +func (p *Parser) parseArguments(c context.Context) (args []Evaluable, err error) { + if p.Scan() == ')' { + return + } + p.Camouflage("scan arguments", ')') + for { + arg, err := p.ParseExpression(c) + args = append(args, arg) + if err != nil { + return nil, err + } + switch p.Scan() { + case ')': + return args, nil + case ',': + default: + return nil, p.Expected("arguments", ')', ',') + } + } +} + +func inArray(a, b interface{}) (interface{}, error) { + col, ok := b.([]interface{}) + if !ok { + return nil, fmt.Errorf("expected type []interface{} for in operator but got %T", b) + } + for _, value := range col { + if reflect.DeepEqual(a, value) { + return true, nil + } + } + return false, nil +} + +func parseIf(c context.Context, p *Parser, e Evaluable) (Evaluable, error) { + a, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + b := p.Const(nil) + switch p.Scan() { + case ':': + b, err = p.ParseExpression(c) + if err != nil { + return nil, err + } + case scanner.EOF: + default: + return nil, p.Expected("<> ? <> : <>", ':', scanner.EOF) + } + return func(c context.Context, v interface{}) (interface{}, error) { + x, err := e(c, v) + if err != nil { + return nil, err + } + if valX := reflect.ValueOf(x); x == nil || valX.IsZero() { + return b(c, v) + } + return a(c, v) + }, nil +} + +func parseJSONArray(c context.Context, p *Parser) (Evaluable, error) { + evals := []Evaluable{} + for { + switch p.Scan() { + default: + p.Camouflage("array", ',', ']') + eval, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + evals = append(evals, eval) + case ',': + case ']': + return func(c context.Context, v interface{}) (interface{}, error) { + vs := make([]interface{}, len(evals)) + for i, e := range evals { + eval, err := e(c, v) + if err != nil { + return nil, err + } + vs[i] = eval + } + + return vs, nil + }, nil + } + } +} + +func parseJSONObject(c context.Context, p *Parser) (Evaluable, error) { + type kv struct { + key Evaluable + value Evaluable + } + evals := []kv{} + for { + switch p.Scan() { + default: + p.Camouflage("object", ',', '}') + key, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + if p.Scan() != ':' { + if err != nil { + return nil, p.Expected("object", ':') + } + } + value, err := p.ParseExpression(c) + if err != nil { + return nil, err + } + evals = append(evals, kv{key, value}) + case ',': + case '}': + return func(c context.Context, v interface{}) (interface{}, error) { + vs := map[string]interface{}{} + for _, e := range evals { + value, err := e.value(c, v) + if err != nil { + return nil, err + } + key, err := e.key.EvalString(c, v) + if err != nil { + return nil, err + } + vs[key] = value + } + return vs, nil + }, nil + } + } +} diff --git a/vendor/github.com/PaesslerAG/gval/parser.go b/vendor/github.com/PaesslerAG/gval/parser.go new file mode 100644 index 000000000..19cbabe6f --- /dev/null +++ b/vendor/github.com/PaesslerAG/gval/parser.go @@ -0,0 +1,147 @@ +package gval + +import ( + "bytes" + "fmt" + "strings" + "text/scanner" + "unicode" +) + +// Parser parses expressions in a Language into an Evaluable +type Parser struct { + scanner scanner.Scanner + Language + lastScan rune + camouflage error +} + +func newParser(expression string, l Language) *Parser { + sc := scanner.Scanner{} + sc.Init(strings.NewReader(expression)) + sc.Error = func(*scanner.Scanner, string) {} + sc.Filename = expression + "\t" + p := &Parser{scanner: sc, Language: l} + p.resetScannerProperties() + return p +} + +func (p *Parser) resetScannerProperties() { + p.scanner.Whitespace = scanner.GoWhitespace + p.scanner.Mode = scanner.GoTokens + p.scanner.IsIdentRune = func(r rune, pos int) bool { + return unicode.IsLetter(r) || r == '_' || (pos > 0 && unicode.IsDigit(r)) + } +} + +// SetWhitespace sets the behavior of the whitespace matcher. The given +// characters must be less than or equal to 0x20 (' '). +func (p *Parser) SetWhitespace(chars ...rune) { + var mask uint64 + for _, char := range chars { + mask |= 1 << uint(char) + } + + p.scanner.Whitespace = mask +} + +// SetMode sets the tokens that the underlying scanner will match. +func (p *Parser) SetMode(mode uint) { + p.scanner.Mode = mode +} + +// SetIsIdentRuneFunc sets the function that matches ident characters in the +// underlying scanner. +func (p *Parser) SetIsIdentRuneFunc(fn func(ch rune, i int) bool) { + p.scanner.IsIdentRune = fn +} + +// Scan reads the next token or Unicode character from source and returns it. +// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set. +// It returns scanner.EOF at the end of the source. +func (p *Parser) Scan() rune { + if p.isCamouflaged() { + p.camouflage = nil + return p.lastScan + } + p.camouflage = nil + p.lastScan = p.scanner.Scan() + return p.lastScan +} + +func (p *Parser) isCamouflaged() bool { + return p.camouflage != nil && p.camouflage != errCamouflageAfterNext +} + +// Camouflage rewind the last Scan(). The Parser holds the camouflage error until +// the next Scan() +// Do not call Rewind() on a camouflaged Parser +func (p *Parser) Camouflage(unit string, expected ...rune) { + if p.isCamouflaged() { + panic(fmt.Errorf("can only Camouflage() after Scan(): %w", p.camouflage)) + } + p.camouflage = p.Expected(unit, expected...) +} + +// Peek returns the next Unicode character in the source without advancing +// the scanner. It returns EOF if the scanner's position is at the last +// character of the source. +// Do not call Peek() on a camouflaged Parser +func (p *Parser) Peek() rune { + if p.isCamouflaged() { + panic("can not Peek() on camouflaged Parser") + } + return p.scanner.Peek() +} + +var errCamouflageAfterNext = fmt.Errorf("Camouflage() after Next()") + +// Next reads and returns the next Unicode character. +// It returns EOF at the end of the source. +// Do not call Next() on a camouflaged Parser +func (p *Parser) Next() rune { + if p.isCamouflaged() { + panic("can not Next() on camouflaged Parser") + } + p.camouflage = errCamouflageAfterNext + return p.scanner.Next() +} + +// TokenText returns the string corresponding to the most recently scanned token. +// Valid after calling Scan(). +func (p *Parser) TokenText() string { + return p.scanner.TokenText() +} + +// Expected returns an error signaling an unexpected Scan() result +func (p *Parser) Expected(unit string, expected ...rune) error { + return unexpectedRune{unit, expected, p.lastScan} +} + +type unexpectedRune struct { + unit string + expected []rune + got rune +} + +func (err unexpectedRune) Error() string { + exp := bytes.Buffer{} + runes := err.expected + switch len(runes) { + default: + for _, r := range runes[:len(runes)-2] { + exp.WriteString(scanner.TokenString(r)) + exp.WriteString(", ") + } + fallthrough + case 2: + exp.WriteString(scanner.TokenString(runes[len(runes)-2])) + exp.WriteString(" or ") + fallthrough + case 1: + exp.WriteString(scanner.TokenString(runes[len(runes)-1])) + case 0: + return fmt.Sprintf("unexpected %s while scanning %s", scanner.TokenString(err.got), err.unit) + } + return fmt.Sprintf("unexpected %s while scanning %s expected %s", scanner.TokenString(err.got), err.unit, exp.String()) +} diff --git a/vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png b/vendor/github.com/PaesslerAG/gval/prtg-batmin-gopher.png new file mode 100644 index 0000000000000000000000000000000000000000..7c23b52b76e7fa16cfc1ceff7b906587880a4375 GIT binary patch literal 34696 zcmV*)KsCRKP)004R>004l5008;`004mK004C`008P>0026e000+ooVrmw00006 zVoOIv0RI600RN!9r;`8x00(qQO+^Rd2^j<&C-W^TrvLze07*naRCwC#oppR%*O~Xf z=i=~)Sq7PzlGusENh#%q+N4csOIzA4?iLrzZ+T0*ZMWUDfu?P+O~Xzcjv;2YELn`h zaN)dv+!?abOVWL(&O!0EY{&4xqd6|IAp8Yi(^i zRrft%oNQZOD2S6k#VZ$M3>XuSAk%<3z&r$VfN4moff6JEDWxtYij)%Rg6m4Wza>%v zvhW*_NI-ZGOMnmpQT(kGe~aPoZ6U}42?TvW8*mWGZeR~^0FP90KpuSqCmOtC+_%6Z zd=w}tydroLfdMFhgGcye;o1Xuq)q_2(N22066p+aCV(*pOb}=U76U60ECc2ssg{zk zbfs_{7uS|Jwu@uCxQ>hK$O2g?3|#?20R%!QfC55z?-3N~18#v-?$F;LLC6A`Kv4yX zs-UV0il(4y3aTm)sz6ah9_UBV0&GXJ5!eju1$yxY_YjH$ca(cKfI46vuoPGb%mixj zNE0Z0rzkN+;Z9w^#v@7!hKRin*j9M$19}SIJ*F+E1I`fV1Q=z&sCcT@GGHx&)kx+^ zNu_k9=GZQ_ zEF}^bTo);Aq-*0kd0Z!lYiDq*433rZUUnMS%HTRVkQNAuqJXL^Xu67~tEhT`P>M(t zh_o5W6Ts7Wk|}Z2y$69sz=eg^5cpcyK<2dAU7U^_`L?T5zktCT&k@{%6Bwq3 z?$gk81w|DpJR?LU6v^VEC<>8Ch{lFm=FXkT{P|7HnmLWy+A7M*q6C9}Ow&M9RgVY) zkP^pnv2BNJHb)|nqPK5=*2A6b+jodvyZ5tq-yu3Wx=E+AfJ9N0qC~QQ?Z9ooAMhk1 z@Zu2Y6;GT|q)R!TWO*xav6M`6ELX|rZ1NdTs(^-OmS6^JFvATPf$9QLOoUJhgLG)| za{57d>n4drx(=?DBA@9amux4OY{Sa*;@W8xRic|3hEGG$K+{wztIAlmbUx>uw~AFO z7cp~s1JRN&ilz;b13B#e<>=QR`YpuJaaJ}@Z{GlW_B8YKGtcwnQ=8bedq44b%3He~ zkwgc8n}FZot<8WJmO!sq;v7X71(*-K3Bg;DEOl(xm(M!nGZt3f1z}+LD)2=b@kOSg z`zulPK!GTdW0d)q6bb_viF7RNT#Q_@olLBmbi55yw^(u3e6GCeaxS}k4NWr}G5v*Q zjq6VBy(NptgIY}-LqhX}M6_yrz;df~9E~2cmq+l0wPxb=iY=!_u0oUblTPOEF@Hl_^)8E*-Z4Z`Z7YXFzt%1G)+=pke zJLLp=#SrHx!UTa!fcF5GxsEK!Wo&Y(JhmlK^a%dQbONPwFhg}Hn!iAn7ihq|h;Utr zX{wxeRt?u(IfvC})!+;0;7qY(nJ5a9sIRA=zuvo!8-ITXJ9g~Dbv?rb-hR;k0N=&i zqdKMJdD#(DHS9z;z{&u))=DoW%`qpGR$N@bn|h z(NF|P*t~fsKl-oV^58>HVml6sXLx7Qe4f8SJO^)S;GX=tc z4}VD$p^C+rp*n z(_fT63E-Q+_ki?NljmhcoTJpTCg3B$yIeRT0 z(Ma*_8a%7YC-YgSHaVCjmvYbp4U{%sOtkiFbbt9PmM{RWl!O90AOE{0eCR!k2>A7v z_#V_SNJ&{mDQnMLMQ`r_yLRnIN)#bjf#-PoSTq{RPX&2i(!?1iWy*k00{_pk<=N?2 zUQNYw2(^Tgy7MS)I3FWaQ*hmQnGBLiKuSqCXz2q-RQ40cd!V$39u|yd;S;ij?^wk`LrEb~G`N#j;d_6^kjG zb{YQCCWNB%N+t|=qzMLeKJ)j>dHdVupcbrBQ-=aMltL7Nbiv6XtrXm6kKj=kU+ip~OJz%$WkM4k+JUUJ5PVNxatybt)C zW4jIMc%EF^Mi10dQnvMoo^}(Hj2VP z{~%xZ(s#M*?gxv$ZvA-U<_?(rK94pfPv!-}Fw)Gzvy@z$%h*OLmP5Ka;p(%A)T}}^ zB3{RzAr0V7=GR?0hj(2&Z)!**1fW1Zj@!Q-r~NUUu4j?41IYYf!GDcWr)7Ec8v&>W z!e4<>x&XEIJe1n=P$DxyH46lqWP@Fo(y|i1{Kb#b+dIG$Pi{n2l`=f{oPBuS)|2ak zlkCD53h zZ9(W^lu$iPcC9KURkf8=mX)&Z(Pzl#Erbwr@WQsAibf+$biH$06K9xeRssAw@Xxu7 zU6Sn2AZ37(h6{++twA*cXVkKzASHx?I$!*URh+%5dh+`;LIG*xv_44w*?+~}b1O2_ z145oUePt*^fsKqe;~ahvnK*<9Peb@CC*2{lW=$s+OR!<%HiQ?Kz6dX>wBuz%oMEJy z4g6S2Zpb9_dTKC-=BuH6#;XaI%`e2|pTW_PQgZEEX7jGM&qtl?02eAU-G#OJ``DYl zkBlEWwS*b94S;Lm_V2`Le+;4dP)g>2I&tZvXe#sP&E(nVw$j$#jiQJMURIvJOw>eu zS`ud%Y0d%u+jZq-saQ^A5;lR-d6dt1HAb*z!X5WYf$K`B zw|~d!V?q*mc@uiKA_w=NRxCmI%O^G#q@;0L9fuCJv2FWa6h-Jj6>vv18p)2AJf{_L zhFQ+82L8*=yE7C0SscfpwEhAjwdbHH24^JA*l--l>Xp@e;)6>ujmfC7g08{8ap=I; z2z9rho{n8DK*kT@c5g@c&qgUNn-FbJv5TYK~32c4I5vM>J1N9W%$8b!=V-$kyJtbs*qhg#r4W%YH{`pi&Dyz!r?CfE~#_cGI zP=Qk5&*OCjJuNms3#3u;c67g)&pM@v{tSvSrWU2$4eBrv2xKRPtQ?tq4u@af;BKGmWQ1ZWTL7f&I_kAmx!qr#3j=S!D zi0+;~gb-`+k}Tdmp5tG1;tV6r-vQsu7yQjsqmuF&uf`15omtO}6Nawvme+$Bp(19o>@D^ky0Q5D=ky)VlLg>n}zrU*wsIRkPrNGYX2n zYnfi${v9}N>u}m1Lnd3t^fDL~3fRw2Cv)pr$dsqH=hqz~@r4J8{^a$D@HjVl9hdp@ zo4EASbNT&EccQAQA9x4wH-=>m9Sbk2sy0kLdmr$ve8w(G3}n&FYATvuiy5jtLz;2I zbtQ9WmGJSuTjGTZj2i;mO|j?jx9CqkJw%!!q<~~V#b-$Z83z%bhQ9DU7^^;qHv4TT zrE?KR*DNT>Yk5TzXOCv1-+EA!zHpmDZk{jwA{^3XbBE4TY(!ek*}hUq+pA1;ST$ zI*Uv$WQ|cOiAN{=ptbcq1;)wO> zPw{xdKHhOJ__q5n(p(ID-^x4H$$<>2QBL`c*PJ2EBv4e-=FS@b;ut`>7Og!uk;%0o zMi~lHx|9T$v3SNes42Sw1@+YAXOJ!k6?MkzF)sW8O7+Mt zTh~nX*6kA**^;$uS5aCTMSA0I1zx_2mqym?lISFN8K#JP>@wC7Z*QK@S=*3d+-ZJ zoC1TRz&E5TFG>z(v2Bgg#*6Sro6eACl5kx~RfV6~Gb0mP+oZAw=!-vojI@!s_>5}i z)_;befA$MYnxRHGh>}_8^Zpa((8`IA4aX&W^ETYR^!Ui52$nBffN2`VPnO}8VI2FA z7or~uq|t%TAh|jf&y&wcqP1%XmM`GU=H}!fB`lm%N@cl!LhGB}_@m_P9*&+^6`?b| z>b-;mE9vctk;~iTQ+cKuj*GTr4x!Ioh%hI*-Kbz~>co2bFynF!cO|oDPanR{nT}VI z@it3DZzHXL%L6IbH*)z_t_g#UDRr zCYREoJbWItYWHg$q{$-*9YqdTKC>W|VoM z0s2Hc;tY<4X(-H@7NM-vH=*NVscqO!oZ-@5N|*BRc|=1ij;w23Nli_Z_ugCv7u%-$EF|m`={!D3#2iC z&r8Y0saPJ@H7IGg5Is;$YmTNHD?hhoqdKhQvjc#Ftav{X=?a<aU}XV z^=ON$nAloJVsGmn-#P~6QbQ2W3DGGmQv*oDKP;lzpXLK3GSZT!F&Tvskk#d0Wm zl#=@MQM5vq#xpo6_zZ=abzu}C&@|yC85|FUz_JIh-6X?DlA`EDgGTD#_q584#rh_o?rr9@H1geH2+j^nz+2a-fn!UWCfjC)dZ zTox>-4oD`Uth|9e)X-~@Q!ma4UNO?1N^@q-N%PM`G4Gd?-p`7d+O9T7nF(sN@ZX| zhsmssOpi}hW2ArhagUg?2+(2P0sklqDw?WCiPo+`s4t@$EH5I4OPgYMx~`m1*SgX| z4xh25L|3DD0gw}fD=8@r@cB<)L1jglpZ#(j$yAOFn+~ySPbaUrY!UCj;d~Y?tUq!C zlfQdJKp!S?#=`^wjzn5+;rqwGzT-H!a_HPP$FW3AwW@XraD6sqW9K1Kb2fUQ;tXk~ z2Cs|tRK0+PuTwmqcACOA&+fvJ=@ zZwD^SB=R79L~74IV?~=njJ3A0@(#J2Gj2K53+8bwX}0YW zi;Z*8ONp+PqHED11A!0%%N{(&ckQI$xXhec#W(-sDz1FHx*EY}3{Sh|c@t#v?NhsP>AF-_ zhWYA0zmX5V_d-llACf+b;uX=|ytS3jfB7~(`^7(S=uqGBuP+2nOANPnBE$i0P8q^? z0^_j|q%&DsTic6Guz`ceD>!wsVnG4kCtW!`lgOj{t2~3i8B7_>aqQdlbZ2nJ>5ru- zK0>}ZBR+H742jIHsc*b1359(8!^bY?^Z)Q#q7i>F-o@`ig?Kta@D1tgm-~wzjUK;UoqeIPR$@+b0ESvk-WDE@NX`l2Fw$uadwSgTR#F zMA8PlNxG*uLwdZMd_o}-TwF+Af8;`g$ql$}=9Juop{v~R&hz;9FTaJl+7cYsMM{aP z2;P3>*?jXquA;ua^tf7z%vj_fZJB^3BT-8H=*!2E07eK7HXo+1Z?I_0I1C)OzTwG8 z90ho{>&k{qGLIgpAy_*13}>bgK(2;%V^y}j9SC#Q(`7h>v_$imuJ2!(ihJE`o! zQ?oWLIEcRCie-G~Kd)x~oEl71=OgdGm@j?)4V0IMjvw}|3Y@+DKsMGu~_q)idVBW;Sv}7o=8v^pHWLRu#8NutuPJ+IlGUZ*u#YV3r^il6&ysr_w{Spf3SzwT)qh1&`vzC zC1mf|joX!A{KOHMm(4_!_>P}PTR=9M;n9aT;MgvTp-A9K;J9}0$xbfk z_gCRR6K6A}7>~3mLQ2WOgK_Nq`1Ux2Vo({qc%-@pbPfECO#U#ZG}^>*S+KB?<4-kv8CQC7HC?yKiuunvBHIDyS*F;u!avw!S|TOFi!ug*qJ|K?HOH zA3lrH+pi_?;Z=yJ$??c7{09DaEky)Q?AMmdSUmPj9AC*i?7WMWb5sQH04k0C4;${FTN4JImWjvOc1xwD^a+8*~ZpsMuMBcu)pa#lC{JN}Ej)y-)W ztt$anqRp!y`u*2Z`n$JbTr{I#W7AW$~y5Ain)~DdNq-6T#7oQgd@^NA|gJ5A3FyTIdQdZ=}PW> zxRrsyEQY^~K*=ofSsUAO4Lsk=F|&hARN|<>RgNu#xr~KBG7a5-CTw7;Fj;9+L_Y7Z zd2XX`=9CfiPd2_#5Rd1XJ}pdXxqn=bMN$3ujXI>XnO*k})Rw&g zRe6czypy2ThA>|<8+}0q!cYl){(RJ`6W0im2yVJ5keSvQsyP}OdW|ZReV=1#C*eCFZSj{`$CGKwI#uy|9Y_CeCnyBLNy7L&S4;N z2s`Is1hh9G`4P~6%qb?&L?41TI+m+gITwFP6N>f{nVDV!nC#l-Xef$c_wE7q?u$(z z-bEB*bWT&%>_jLS54?c26T@{Sx7@yu=9c(SypVSui9a&kGZJKMkSt=1S~=r-OoapI zAz7Wz+6cw;N^`u>Nw{C8mQhkG8RZ?0cRbpuAPuL zL%Gr1H-V+ zbVwiCi`CvY>1x?0D{X`jWHTuqcz5INFp~z5tKX^$CAA0yOp^(9n zrRA@fM~8r1-r=Xe+QvN(92#kQJ}xAh8NmotVxJSvbwlCNl4<#@ zjjD$+UTK**B_tBf~uFt(LH>bsCmTm0p(&FtAd@XFXm5MbGoU){Kq|NFxp+zA?hg`#7I8*priV@nX6 z$FS-J<9TeWU03?;yn_*}Le*YbaW~S=lF{%c`j;-fOJ2%Bby3IJr3aOfR`q z2bp9W>0}$})M3(@HY}@y&hBP9J6q`Q>m;3s;aFbGyQ-?Fs){&{IFiz3>%pCL2g~4@ z`(VL2P*RCwTSWJ5V0y!>WA8=?GFhAE<~XG#CY4o_NVI;sLlI=M4*&g|9sKM!JILjo zNzV~NQOOMKMYD$N+&3jHkJMW)uk0wme&x9lwlaA7e?%QiT%>a1U;&NSNau2tOOo}6B`F+A*r+T4yqnT_m#Y|);EBnnyBd%G|YxKeE|Oa zQ`oZ+<}4>=IP|qYPA;I6D=)*IKNGiM7F0Dtqyqdw(2S8u&5!QB!kD#OiRD-%GbuV_ zz0lo)+P;s_o*h(X6Ep@xOsi?6va+5)FidmHUV1AUp}Yn(4et0UxGr3DC3Zm_9Ba%7 zAxNcc{_@vm1_p9maA6((fIg0FFsCt8!T$X*zVlz3dHC@*q?`h6RM9Yk)no?u;Mx+! z6f2OZfHNj>3WkA2(v<-_@1O_EUV%=X5=9T-A8^QYx5D%VuxK@GehgfP{Hz7^&RhgD z7D8z?7(Rw9j&k^GoQc8}6+%%6mq5cDoRyan%VmiTbh58y7v_PjRPBF|>4wgMo(}BS zz7Gbvp}z}iX28{-1jiw;^HCJ#_zsUkKtAvC=woekcc;1h@@dr6248v;qatu@mxtCJ z<_ACD!kz5BiM`8AID60WgE0Duw!ls+iP$XLsx&hTQpk^kyRj;CTu!q+D&%@5A zVbeNKbu6ibcYg)yW}@Ep3#OG-p{VNc1mdILTVZhR*x65acZLfutY_)63Jh}`uK6!E z3e^reJJbB;=3V^Zj{U@w`3abu#{ntP%~BNAkDbp!Kx+W%IRSC}2$X0c z4<%T>yppq4RZ&^#AL^4A6$J@nBrBu1GZ%qv zLo((K9^Ld7{HKHWL~$^ON?`Ufn6ZeyzAj>WHc%0Y5(q{n+(sb;`MgU@OPt+%21z6> zLP4ECP(z$dCAG;y;7OQ_!_&`o@uUCQ%Flkioo&1Naa?%{q|DLRSFo~uWaBNE0UbpX zt-zhflQ`3*BKnh`=#UxwAygaFqipXwwvu!n5y5IH+VUJ*z|K{W3`9@yqt#dH(G zrBFW$7Mu<9&+)8hsya1mn~Ant9FdN=4mb`}HIjYPN7%IOaoV4`n~SH-V)pFC2%$_! zbrXfOxB~+@9)0vMn>Tecdrk?97L_xtF-#z+9l7?Y7!txO10o$sY%s^x?S1^=?gOlU zrh`P%LQ%xnVF6R!6hJpiK`OYm3x+ZS2#iXcVqy*{rRq8^rWr*jQ&yn*^negpnLZL- z8%g!=#*G*T1xwC@b6*eB=6hwyPqjKz zyhb_>J#F5;O?RtjSxd#i%6TOB1?kGW-1W#^tR3iK$)a-z1jA6cmkD$cAc!X{Hf-o- z>()N1D*ZIgiqg~+rKUPSD5Rqla~sIQkBM}H;E0t%f^9hr#`3haCE33(#xt9`c!vo!EqP1&@)~|i}M0`p~YG5~m zZI5GT`_WAmUr zwFeXhhT|;>C;)gdNuES=M9{7)>%ZOQ4GUC)3i|?F8llYdG^_7 z_~RdMW8J!S#1kpfd5vUR;+EIL+dl=1*G`ezCSl{laOW=&U579YLBJ=`6&qbs2!(=_ zmse0(QAt^O8KF=JfJ7oeS63J9?d|mS_mR&#*fwMnS**N6P3?3pxbRKPm@%~_J!Cvt;hR{p z^xR_-ufNbJR?ptk5@%m)eA3>`rxJY!|3+qTD<#!I6h(Y(RC9DR8mR|9l+V~9$1w?4 zEiYKrUZN0!^uQhl4nKmXNkXM2W#uLO<3E0icfIQdB9RDwzkl4M0njv!*|XVYZet175IEU&zo8Zgb*l-g3srpva*t;OPBGQ*S?ko3l`GR*+Fk#7rLR5PGvaM zyc@&tQByl(-1mJ79u0$uJezj+aj-o(*Sn4H7Q(QBoS=AOG=FTzTbHn5IcE7(`VkSJomDiE!R|Yw7Ch zWZ#}0peW>e`#cNR!qrSTQcVbN;eXS&QTh&38isH{*s+ayj|z39@s~sdlUYkjxVHh{q^tU zJ@0uhx~`uTX^Qaqe5_cplINd)p3e3|*shA*-s*W&*Up@nifiTI{u^QQqlAMH^0{1n z^|gHB6Q3j$3SpWCfk5DxK(+B86bi9+?OJqQ=lLz00hd%VO>D4_S+kd57%z-gAp~S{ z4m%DEvg6<&+1%-;nvHC)z{>WM8QhN<&`>p{f2`h*NQow0iJ}^a)1}DJBp_Xz^x$3u z4u)T)v9XaWue=If*YW%PCrz3ns;jH{)Tcg8Wo0=bpNo=9d6uwLZ2V>+*MY;kVe_Nt zs>H9`EL^yV_rL$|@cDcghJoMjKPk1!=kxKAkNiFFde;pYK9xYwq_gu7`}b{qLDV$? z^bY2DdTS4RTjK>w+9|*6NkdUhJgck(C>e`5A&{z+peW-#TM8P}2u~Ob9r_W^d>NV)Pj?XvsYKf|6rc&neW-Lvj!fY(v4u&>UvWoXPp;Ux1Vnzu!M8^=vHE)zxw7rI(_p0>jYg z?mkQ+F>p#INg*JWvDvh%kLUOFlg>^)ocYCqP;`p?lEH!AQ$n21-V9G}>GrH< z@};4gjf7XJP6(x_s+nUE$1H9{d+VE*+?^Kg*Tu>WdgD}qs>({r%ge{y-2BU5?&13D z-@y$xypzv;?lZ(+4Yz1zkUJ_2kj;x4-?3uYK)5=nhI+4;PolY8h95`@b$fIB~2#3Se)z`Cm%XSn2PBzWZRHUe&6$~zp z?Wt%|LN@J5Cm~RTBoGJ?4o4*<9TR`JbM-+a)q{F3!SE8?I=*VQU$4y{ST3X6yKJ!^z*F7l< zmaglRmscQ!L=h54QN1)yGZ%UpK}#w<0;OZn-F74=T6_S3i@>v@`Tf30w?kD`uD|Y` zw6wI4$z-_t>T6iBV)=;gxUPd^+e7b9lo{tZ-AsQx&$fL7boFP3K6`m##3bx$z;fVg zK%+!@vrb`MBj6?N5iNv@qWO@n=dn95&`&m-9W|@}a7=~ScgPARU8re-_kIPumWn}d8{GeU*!e61iD?)^PMed(>NRWl?Qd_yaU4oZOON?~K5voB<%)}0 zblsRxdV%8JOiM?K9R~(UW$c$BVLZrUQDIMGb2Q*xz)Xo`T0&u2ri494lD)nxu8_#_ z<^($(Aq1Kk9m?L+)zw8`Umw-gr#2(0ZQJzq^&(|qr@5pG%4$%azK7tRM=5cWr~;Bo zfWex@*ehP`k*cI}NQ)K)m2$EHXGI)O-HgRzB$LU4VNc-q2N~~*LIH$+ucL7SaBRDi>mowa?(d2F#MH( zf@4d1dwbctcQ5ni&6_e;o6*qK)y1w|yKqr(q=1@fh@DR|`{`e>WL7OrGpC~)I@xTN zgUwIzz`c7&T=8*GwIPKdpU-347LIf3Ca!(^_7aQ5aBPXHYDA*tjMu8xGmvHLz5#k; zxe?LNFHZ_uoT$U=OZqzog95 zQcCndC8{38%GzWyS=O(AjC4AE(lhz-@c85FX>DyKZ>vbvfLTkC@qV0K21VDAih}9$ zQ&L(+Nl6H;bFY`;DO7?)K_Z9^4l+16h~qdXOe{DVxUS2ik3LE+o5RZ6ghElu%Bqjs zFG7IhNcOeHd1`A9y@R=ziN);1kgkQd5h?)E3J?bHxe8%vAb}t{Rrr}sl()+{{8KE- zCHtR2J#Y`{f!#PxAt}UZ-)$1jjADlBu=5VCEm^<*G4}4=i#=>nJe5c!68!Oxe^dX&DyT+;U#Z(6gEmIDXay=OnY{R2F|bvwOnhxzB%FJbkT|ASaJ=q8q{nYP)mtB>dR4v@*c^xS493jw%} z1#-wzm{gKXEsWtC}dP>4hAe zrkyAh)$^4a*pC2mRvPe}J$*jkl)ISRdFLJc_P75_GAl^uRYXY{uL^YVwd*hB(lsk- zs;i=_vy+~V4rWzGdH-8p#rZ22vAjOa#`TZW-J3)p(Um;maF`1&xDX)(x~@-ITV2=X z7r*#94?g$+*_4fRRn9%{byQZ>k4c=)-V7Ue_0ie)a1ks0{H4P=vt(1Ql|+;bNzR;=KfYp$JA59yYc7Jl)IpOebuNo93VRA%Xe zeBuokQd3!h0&GVA&3{XEgBcOBV9Q-G|BBDz>Zg}`p^$k}FIT)1`? zkO(hXNuL^xMm{PajO%hyx`IZ+#|(1=l>pwt)xsr&$y`cV?Ub-Q<4`x)fi|$0Tznsy zR6Du+Ahwgkl}=$FsNm2jgy)GMl#xmG#PDrOiIftFiz{8EbZ}hK-}ALw{vl%l$?9+xd`78?VNS(G!Ia$aLO#H}oyi4=!vrd`=h zmns?16jHiQRx_|v16S3Nii#}y{vZ+3MYuMqYooX}n(LstE~@liij>|9iI7L0o)i)n zfhz=#P_PvRTT!u99a}YUG#^qAfF45VLCi=avD9JWS&g=iPX6UzzKUg8yzz~1I;p*z zd_K?5fBrLWzx_{SEtOPS!i;(NDx%D;k9v!H2`wGn-1FE5Htg>L*JbIf8kWwgXJJz# zpAG=$#fmWZz$Yz&)v+QzksW+z8cH2PD(;%G8w-A{qOPUqwC068kwBK z(z(lNnzeYy08wdXbD{B_-a)E z4HK0Fd1B zeu0Ar5Av>ey^HeliV62=JRav4zxX*n|M|~IW*lNE1vgp=mtRYEpo9B2K1e!~qop^_ zwu7C#`+e_c(Ir7zT3WdLk_-9HcfZfK{<@0=wPAw7;y`2$TyZw;&EFxOPM|6oe)OXs zU|D&tzWSO;??DX?4)UWP{g4}P{68{zh^GZ88Z%}tz%YEcuD96F<{Wk(igBPVfpt<9 z&0ao`(!sG(2t^=-aDX-qcoujkLQ!|?2{JhgKl*W3B`SgvOaf@c$dkmvDeOSV5#Lh@ zq7uX$F=LJPEaY+Dk7tb}hW#9Q9Vx-J9lq~tVee(v!1kv}Z+;Bd)o^8;?|tvPtXua8 z*Ijo#7hZS~RaI5TRf$~JrMI`2O`A4x(@no)-MU9eWnBi73bql1H+>LhFULtt>zj1-@e_xtkeA(tih@E_<;@C4jpP{`}S>Y*|LS^=4R5FELmG4krFt* zFudv=;8?h^He4w=csRk1gE7+Cm#^E*l8{mx}fU{oZ89!73fnr#K z0K-RO-dcWo%iUbQat^({eSGI9zhqC9bRqvz4Mh;W8HTn0|Q-{rcTz%GVtWH zJoWT57`lefXJVROUNtEt`Fx&SE=w*qvYnZnM&1Idr^BnSgLx}q)5Em2HIsL%X&=;a z+%r<<7yxELnZ5{tW2cZ#7FAO~(2t~B1LD8~z{QTH@R-@n8|)_h1uNRP&ky)e@uA`% zht?W-oWh#M#f4#Hnw%6GrS|L??Gd;*?4Xx>LJ8K-okf2>;tgu)dfv%P&xgjjuz4NU z_NTD=yHH$-rYY!}KvgV+u!hd*x&qtPux*LsfGbppl*8)t;Nq*GeojGRjG}l70SnLN z9>?aXdmjWl&)}T3IIAun-Ib|Wi|3N7p=mL4-5<$rdy;ITA5|!5nm|(pYAS~!atPsF z)0G0(1;0UsJd72I$G(~^O>{UD?C+E2cl5N)ylwQw|n<~-Ww3l-H-qA`zXx^ z36{-6Ce3=zrYQCj&)ZI~29qPMhb|;<@B% zNDM-}&ueEU=@Cm&ym0Jb#3OgO4E&*y1%8Xttyn_dbvz%ri>@NQXbrgsZXvqu8A?kR zqxnkCNFV89W%^MRP*eeuJ%B9c9LNE`0j>~22|O0+XJf95b7kF0OPx{MGDYVpY;yvj zA#jo0ArA2MMJ2Sp_9I@!G`CQ(Ob$P47*xYsPe{MlvY>?0VmjEuK|>q1NA{kE2I32e zA~{vvtHo1R?X~CVnMhI?@N(<|zeD#jtg2giJfRZ~!euz}sJt!*Pj%EQ3@0*LzrHwN}GNcJU@b3g)jh<*I)sw!IFRwT{v^~M;L z!eo(DMUn^#(~DPzakW#jU@SlzOS&PKfR$K*g!kq zS`en|iuXgXwZn4!mm>4*N7SlK2@L@PxH18z1VF%&LWv9MPry=s`ZjcU| zW&-R4Vla$21tWnCd=EHBz$G!i!VjXm3B)wkx(%l#c@zX&=;g5`3!!$_=%gtm{1aC8 zs7x;6h(&4&)sdjC&=fV89?}Wx3PXuxM(e&|j^ue)GMIB|Pg%4kZQ}V;Me4#j&9=M@ zqKQ5Tmmfr9Z*?O;p5VV0Lv?V1ocxCaVsUl%3(bH0AkZ4-oDN; z-*KU=mRwC8c6I_)7hV<+ICcgr(~G97D2m7c8;k!P>#1x6J_a-^2s#3iFKb))3C}Y? zddRGNIzS>B&|G97%%}uV1lrd1eC)~XoSXH7YcWD!pQ6RSDqvxm$(m|ErG_%xCO5JB zDG)5ADJ(8Exu`BceQ0XpaE|UdVdW{)>ItmBAG%u@bseLsxS=r8$?=L>=M-w<*!cvy zsez!4f<+BRAx`lhc-{mb0CuU0!k{7ezBtII)r~w&yLax>pd$r>sI7Q=8Dq3Z$)>1s zbfQjSBXKx}Thu1q2AEfBvb5ZXp@=DwUq#}TnJVYj_?c5;Ol@zwE+U^p(?ckBVIOKV ziQ+ma=@deJSEraBkD1Xa ztmh+L$fk)6?jW`D9B7<>Y(EGA@ji^s4m2}zdYIu~AV}9DmpF{72~=-=^8_$7>yD{F z4Z_Beo{q4z~4j|(iNf~7mQddV)1o;8I6<#il*+lSfI zGzWUxM*hFJA2LaVbcH2TC}~E6#9RL?FZWU5pMr890!SxtQxwvreif5V*mw_(~6?HgG%V6U}p7UgB zHA4f%41hNA*<1h%%T1<-^l_4Ac)0i|uM_m2p$dGeLR-pa@)m0WP97fkJp@AVns-BF zlmI@Vz(5ziJAO@tZzhT!nE0h$q7XtM6FWewZg z^EsSc7CV>4%4TtL8SH${``of{9UIqmj+o4a@Xo2Ig={?vAw#7fQfCWk(u+bSAH?^^Hrsa|3p+n_$#L2yqK=XFPty@_0hIU)bN- zk6@E-s;^DQGi2i}n4vn562n)DGvji8W;{t>_jW$)E+B|8l%nHQbhp4$(Y(+Fn{*-m z=Y~W%U3dSEr>+XZ?3JD#KYVB0bzu5JSo>yp`X0FKS}!)ov5(Wz;SvE)wX-eAWr#W! znM19_;xVk`Aa*K-n@&P5jbi6eojj^^P^F8IE{ZErg_oOJ5l6fRTqL&R;s7pQ{sgIL zxT=Punn=SBz7T34iV-d$D1wkRJqK0O?>UVY@AG&*bczBypM|{}yWPt~d?u79w%vmJ&X}aeFO9w(@RwEP}6&YTnyq$fQOX&Ok;nX>4y zH3s!C>2MWJX`M%gXgT=&h1jDak%U(X?&-h4s!O3}=14y^@T7Alfw=$ols)qdr4_3R zwJ%>m#1VpAvJEF6Cs1a9U_X-QhOPO5@e>E&A)$y~)2~-%lHKIfJ(%H!p$$YuXqdHY zxYJojv}-@FlQRp+I6SJTh2$G*fk?%_i~m=SqEsK`A&z%w{QM6_jl|kebL*YqLCvW&C+wTUv?4v?KhtE&`ocUxzelG+0%;Fb%4lV zD>ZI{nIVnYH6_emR>#a~XVFk!Pf1A>gdi4=V_6m}R;)x(6n5_1$)ErHr$X*$8`pKw zG>sc>csG@WL>s$y@8*$pk06AC>$)^FG_ZWd%AtJLZEbD*?xvf_=ktY9hB~@#Fn|7h zKKz0AfrNZMM=Ta&uz!H=o?e<;53}RIUbgOC&+b8+cCD0DB5Xf zg*9El4M`~8gwnhn{l4E*+S*S^T+zcy|qxv{a{ zhu{x(&V9pRcbZ7;8cOOfU}VP%EIUPX@7;VUSwl795g}@Fj0=lle;rZpMh3Sj2n-Z_ zX!r}6cGMByANk!F8E#h&aLWuIJI}v@%);}$z%E6DbOJW7gN_4m;aea+0P#Mey$6^j z;w-NVvufT<7S5YPLqh}MNQ7v#q)@#~@X$jK@z6sLVp$eG(`3%vdA#+lZ$(N;OG^v8 zckd#b&0<*=wryis7D^%fx|j>vb>&c@E5k7G`Mi2Kp->3Z^c4~iYUsL-lwPUDva(W^ zE{xNrlO*P>Z)q0s;ihkcP_bHjy-$#^X&86d2&}Xn+ILm%I4tsqVUu` zu=+JHXO*Ys<9W_n^sUNsGfHMj>UC zG6q?W?E>#xhZ$tO=wyAVLQ7eMc0XnO`GkX@HaG1eZ zjDrUcvTN5aQmGUl_`nCLtgHlJ=gu9pw6tQ|7K4L>^!E1B*VjiTlOdH#($&??S!bO^ zcXt>4{R6!AwXenJ^I;e!x~`!pO2G>57KoK6m&=h(XL#a?$FXgT*|X=++uKWfdmA%m z%)l@V0)ZeUB_&i>S5sSCi>B$M(<##FH2wYk3=R(RhBv&C>C>kV^~2ymKW(k8JhO2N zcRs#}_0MdkcQ6elRhZ~#mW!Fr6V%G_MW&(X{#Vp~&B#Hckm%aTK+6L}%6%9C?Kvc` zX>D!mKl-Z^-4u902wF_PJ|j8UM>cVoK-s)wI$Ej*xiK;ujwFaD9aIF`P;NC9z3{hM z$fb5PQO5IFi}I?GQm&JwT-k4t?a%WYv5)VrP+3?PFMs` zSHJotpZLTlIdrI*TW-09cf8}ead)tk64Nxe{`z;abLUQe`OE*|lb`xDnN*6t-afj! zyXfrfWcTjf#N$a^*Cr4MP*YPwQ&SVERFZ*#0iw|;ilXrN6HoBmbI-AA)hfREnGe$0 z)y@Cib_aLd{RpO5NFX{L5j4H^&A2y%1!IP3Oa$XiAaT?`hifHAJU=`&T#kA2k>?ru zDUN(DogC?bJ*b*M^VGDvf&S6XGH&7&tZRpW`*ol80ionb_wUAEGRsRfJM`cke5r04 z@ixqcuVK`Ju|O+G5B(_P!SJ1oi4tWj6g|2%G)gd7EK9gOJ*|boLR%}%%QvQX&kGbp6(beEr-~@e?RNjuP2+$5{X2ZF=HkT z4GpYXbr$>g?_=G%b*R3qNgO+kbn>{4g_QQt_lBZhM`^ov>5-|J zy;R8x(y_=TI|zhz6h-s`ca7D`iAo#?xJ}i>b*5hrX5uYanO=-wb)m5WD<5NCBF0J< z5W*aiD8mUfyz7pih;VA*BAG^*NDlTM*w4Lxxrh4t2F^bFZ2W#7yLa#A*=L_+$Byl! z(`oAK>RGgC5pR9#TdA(DAruM?S;8{81GIGg3Qes-b%WR~_rm5Mq0asSimH*zk2vhc}gNCwKc0)x^yYvRfFsA@292Z5W9Ep=JChZlSm|pMoO78 zdk)pr)x=^k`Ui7`Vqp`@W#rl!`VQVpF4cNusuM?^s5oXLccgS0;omr#>6GEpwGoSe!nM|4u8#eOwZ+r{OwwXO^Hm`gA z8)=#`ozl`$e7<7~*t*i9tN(VY%ieq&g&KAg7OD82Qgh_H{+(lWMf+lrKuKro2rLae)+=|LZ% zpr66P1dlwriSy4}j%n18$+i*>tUB^QHI1sODypigIP0vlky6sr+s5WCx6ync&r?r5 z&7c1GN7_4+m}L{(^c8}9x|@9JFyT@iU008KmdA#tADbp}VniJxnkf7wgtrH#s3@v3 zR7HH~byN-pQP_+=x=}-!lL`}uZQEos8C)+tSRS|;FBiwCn2h670?Eyqp6=l>>8x`&|w-Im!Zvh7wo?k;$c)xFO(sasH#pXy_tMuI{m)u zQR6D*6(tyJuf+dOi}WwnVejcgUp0fu%1UCfIN59#pWjc(^jdmo!`gg^h+**NH(o${ zM?bxNgXGvoS>%G_-m?&b%F1e%t*m14@-z|ipZV!e|Cp-%rnKOByc`^3VzM5C zcMFA2_=5VvL|;GYfqg`33bUT(qg!N8JIhRQLFoW9(k8#V{Vv|~_aEZQE8j|W?2qWp zzrY>vq4W;oJa8kK{(naO(z{T~j&*JnLL=at4L~-V<4|iC7XWTonDqKtxJf^z&m^18 z0s_-8$k-Vo!TE&DAe*1x&Az`r!37sCr*T;$X~#uo@&Hs+RNy!cJv}{4n>LMxg>z`D ze;!h|Vr}k3n|mE~VU517JBZ6>BDEhM^PV%=9FFbc_nTCeT*HC(uVW}X=<7?MXl0Xf z?QoqONQt5-n5J1+-=4Z>eKawKzc2nhoIsNyl5JZglPT=HgYHvZJZaNAmYN2WAWlJD zYe#acVX9x%3`L}Rw-YR%kE#dJ!_&AcxPw*6c2-ea$f)_^$etr)k>qelW0Aw9K?Z4( zlJhRO5LJDio}P7-FZ~W$#Y#*gNBYn`q<;JiYW;MApMKqOuTfqWWySKjxC1Ft-`+z1 zElZJCs04z3TARBu3x*Bbcr|w1~5z5NS z*t>TR)22;h?!5WjchxM+*Qc@f{0RHNuMt`hU)fw{o&7a5wPZwl-2NolJ2zpRJDY9E z7@MEp%gpJu%%0VVFv{udd4}!Vo6*ej2~TbkF9Eq^c)9!IpX2C3U3?v*VvXZ<^f`_` zfARf<#)uGfbaaqOWebJ^aRB)9iH@7B#F4nt7!$qV0P6V~oH{DGg%rG59J1Kt3n_R_R7xRic!y1moC>1@1tK#6R7J|Ly0n@;h&5$>P~aDN%6fIk=PN zjuM(%2GPwKOh!^1ueh$R>*%_Us;Ztp-muqbB!J0Dh^d5SS#)=IW91#pfCg|2(0n{; z1Cu09LF()U{$TjE{fiJq+lO6Qx-u5#nZG?q)BG^e&^!`0Jhpx_ zQYyUhb?2eauEIXl2hNGt0M1|?l+S#Ok`#9RtTKrcp1jWMH ze-jn0&+@>bD~P2u0%eo)K@-FBF%E}e7~UkIzOdf$Cj%jbw{hGbD|m5;R^WHz>6l61 zWg$+dLkRIEAw(pbh@qNM%tGczMbXd$RZ!BzVChT_meulX*yKSo$%CSU4YGqyIS4oe z(ea^8D4wUNhS|6zu^1prj64=Del$ujDZwO!jvuvf_9N#n!Qhd^ex_YElSo)nS-za< z)2fI>LR40iU^JGaEv-jiHVu1MJ0QG`!XZe~*?p)=5Ur*h|F!4fo;#Df?)wW%mn>!0 ztXcf|&wu8E3of9hrnb;#F=EdPAa8ZkGk7_peg2$nXmefu_&$s`LGE+P;J zaQ9tzao%}rPgs#nXe!#gTCB(Rl7Hwfl&bGwAN(I=-v*p{pXFC~9$;^aj_xZN&-gfc zq>)RukxLvRR1zQ(2@gq(Kp^0`B)x=SSr!Km93VcJ#tdo*A^sb)Fe$}H5FO(vq8yC?Iz6!-4$rD%-jWc8Iulhj zsHlh%2n0~4SD-I$z>TFDe8*355BHNkYdSl39ipnL6h#qq_5TR~pT7}TQ#g3w01rL* z5bt=$bqov)@YrLI@z%G#4bwD7Z9^)vo5S5d$8}W{8ODF;KHMEY!>CwDplUI9-FXM+ zop&A$4GnDAuz_?s#k_g*Mw~lQ3SO?MXfm>efgJZ z&P?YgKfRT_(uAT+{Ly>WmpjPD4-t&|2p>h9v5NbSrKU~v+7mg?iM}s{psTBk)|L(w zMW7pM3to1o?%{DaW8q}ua9!Y6LJ?Q{!^ZSPe>bVV9YktIQ6~zURYfAHfl3HeA1P8+C1F$fG1WkkZ;6E=LURm0E! zxRLyu--#OZBc;T$Jdft0nbqwW#NOF~anXEKU1iVS76u05oWFKCK64uB+(Dwj)nqdn z?!WJTmMmF9Wo0Ed-gqPP=FJSr=pML}n(}Lq_5xz@6v6Y}PHz1Nu%7)KOD_5| zl~vU|_`n0a?aC{;>Z+^xumAcHt5>h4s>)j*xv||?Pk#z`a2Mbpll|CRufhNJFQTuW zi8}pC;7R~2+u?~jeoJSsN}$|7jvg9gKq`768}P{E#~R4lY?e&MtJth43aYB2s@kx1 z(8xjUIj~El^c-P5hgcWK^_*j+>y0B_HwXm$qh8l>98#$?uIm6|#9B*L6%<9qwkhu9oRTCH4c}L_@gZQH}QO6Z(>*Z-vRUdkw5=xrr zh}QAAnPt6wfPSkN9|{hTE-dmTULDdhe3TZ7`A#-nRRojHo(}dms4QQ#s_?+%&;l1g zrt)O|?+Jw8q~zLjS+ryp2tg{9$8sOT(91Ejxop|8nFk(xfDe7>Lu9jAZoc{VTzB2| zl$Mr`dGDrCPsl%?zP>>Y9cp9RjA;nn#M$#x%=$|xo4$tIZo8Fp&RI=kV8&_sI-27GiW+Q=fzKb^R0|>E<t@x88m2V_DJ#|R`y7PGV&{`& zvvJa?7^zg8bSh3d9VeHKV_6AYH;t-h@%wBdVW=$AsI4|>toKt}>n9QKBNn#^1V)ZC zDWRvQou(N+X3vUJU1?Af6$Jbanwm$-4EcP5=0k_*?HWYWG&DomhbL|N$0KdvQ72 z8{2lVESqf3BAdyP$>z!BES!Rr*EJQ>)G-VV)6mfk4PDdFv?JCzRE4LX+QH|){6}23 zY=pmQE|;UCOy?W__D)L6BQQ*A*|v-2x@_FAhrj>ik8x}lGom`c&w>5pl{PR1#3@+V zT7VxLKJ|xYK+{uw+X3@F12R`?Y3=Vi<0=8{o+YYL#;5*+WQqxA&W1psc-_P*{A{d5`=_|fW zHWTK5fAe3=o;{n(FMkzJJoYF%Z-0O<|M+_&)QFtbMJBtClJMGN+EH0q!t&*Far+V^ zzqy6{BNr0*$P!dl;mvP;3*Y|sx47`a3z;)#F7J59b=-L4@2RM{gh+4!0bi6YTlaCO zrIT}3FQ#GDGzOeB-Oq0&zBmK-DK*o->3z?0>%J#xX=$aWr<-IVfotm1OWeza;nI4gw#&4F9!j5UN5X5+RjJ z^6K{ygZ)4b>FZS)?wFChe}RFZjf$~gDj1!LsA zV_1IR;ja@n#IkJua_=T2{voxxs2(=W2y@w`qa=w}75;YLvwZ)DcaYC(n0`G2d>(ja zBGLw?MpY|Hok8GRny#!0gw2vvteI5b4#L$dPEI^XiKh9m>eup{^u3&uiqk;!q!Yj= z$dkvV5xbkW$N=k7Y2fsf+Bpf0X3K9y1QFWc3-hMW2+QiRq{43qv zza|oi^0lvhodXB<^Rxf{1#j@pW1*Tyh74v`mGZ=sTiLXEFH4rp03^1PKvR!37&wDz z03zSImcaWiL=+q-6-D7yuX;6`Hf`d82Oi*xE3V+$Yp>y(-`vj~w{M^<`fu#sVPhB? z3+6AN>9X?)1OlWINjf^a=;-RAqa%$OS&ArDW}Ec00HGQPMZ=ZeVqFw+DvTv@ijJUD z4OLN4Re^m}o-8RPn(jH*ju?1~ptGZo|N8lz#0D*Vfe^r*z}=I%{*(}B7#;?G=L>2d z&1Ed&ozLP6H=+kBPV#|CbR$AXX+7IgeKb!}AQ%ajnUo|?*P zifW=LCelq6oLGyG=-6`~@erSSxKiS}auhQ&xT9=ZJC4i${`PNd+;{-p2%@N>4fr;Y zo~X2eDI`uo>R7<{gd#2pM$Lt>-WaitrzoHCnv+)90HhvbkU>r+XJ)3b|5J*IIZRV^ zk*tw*e5gCaS5g*<3$E~D^34E)!BX0-Jlm54+|_k}vY*|@C6U|t)Vr?apa1Ef`S~w? z4#505^DrA#%-2=mw0#zP=T4NW^QmzFSV_IT}L1+`02uvt|wd z@gM(AAP``|f(3lz>)+)0tvmSIFK^|^L51$B`M7m&gvHff0+L7nL|HaOq+;n5J!Ofa z>Rxg}nZ&m36Br02s(NZ%YHZuavR!oJDB>sz*_<_Ul28?%c=UO0yy-!tYoME&1N@j_ z)dwbuDV+`q=IH?-p=!!&r9{ufdeMvsMyTfGPA&viri*h2Vl1U%Qgz}y4}%;fm;^X> zvrYi>WC_9ceQe6Ml5Lm`n(7Tgf6yyRTRoFZ(^>44r961YAE}ZFKJctl<9shQl$Ap~@H@S@*_OhzQgJo{LTU zp>I&J|5>zL-gDZF%*I&rEtHD$Mr~IzndCRW`CroMH2?AM|G{7X_5feK z^&}&Bi4_lSMZf=`l>kAkCv0H>x6fx?1nw)dH^AmZs6a5zR8jXj+1nKvM4kr1AI+U#ExLp z#I<4!wynptz4(;LcbG;_Oib3~ap)$^AUXVKL^!^$mjrxhOd|xL2yA>99{vNg?=Qrt z7rLxa<~(-k`}z0B+xX3mzvmtAcn5yJpPO&`JyP|flr2T6S~El%DIMB+eoaoRLR)(? zzSrG{apA9#1O23a_(R;z!DHH!%jNjZZ+=5Am*W#3{{+9gu zg~NN{?*Dl6J5LIysN5F-dw_q_O*I*am}FuHNOWyv@()rX%qY8+6gh0P z5eW{9Ne+*ECJ>~s=p;=F8x=uG!9RNJ&hWC09%O*^q6>G<3V8K(V3^*ZeBgJ50!*?{ z{Y`=|Nc`d(_?LS&vhI;b`MbaS09&?fVe{rq$DBWBb>g^5>MK6X_C0Bmc@wqabzppp z)c^C7e{9b&$J}wp9dvbd@Uf45oZsF22mbZJ-Netm3L>Ro)@7zke(7cn(GJ0sIa4 z3E-ZLw1HEksuc_c-~|M&_J#C+u=0+GcRq*iuOv`5XW}ZGl$ic1)>l;V^`7TB$1SA< z0~G}ampm>m5*39Adcnx7P)rXJ#=tkmlCJ3Gc9*V zyWrm6cvTSQtbpkYAY2O33Od$Y$CrQgTW*^_kE^b_nm_#E56qvx0KeZqbREY{qN$~X zLM23_N&fWbM|k5K&nH+p8*B}yJ3gXaJ9cbm-MUBk!WX{CW9uL1U+>vKV&$74)(cy9 zLRYJo;&=YpFn6WbuC7BkDoEGy%+IF;h4S^|d2HaN+8xpqhV9b~OU*PYhW}sSSxKZM zo64|p^Fe&RW7ma>qTsq3k371COGjl$&aURcx=JSJJ{F49t$`^R#k%xKu>8H8);){o_Y3kJlHyel9vl{5{ zj}wch@NH^FQ5DSjbwl5^EQ_0OzKPeo<~8*8bn}&O{E$Ak3jNMsV%u5d{IlSkS9=*b zeL=5{t^@ucwxJ{4{HY_(h-rRg(u?8Hb%PBX_p@TdZfdJa3HS{(O+^&5%}Yi&a>_yy z4cC>pj*D$OPp z6?W55S+%T+@~BBJpQWSy0Ka|mL0;e7%GJz6D`=)JE^%_~pqF)KFPke&+9Gpsd@~5> zA*7wf)}$wGhUNLfbv?`4(hE=y@1gQQA4W@-?4Ae6DtD7J1UK287%t@U8GiS>-w}yK zh?YbN_ygYVx^@wXL>eJr;etkN+os!1k+`^y-bj|gCmyG}tB0*yw{zEB5AfWEgS58A z=;{^J`vi&%(;3mopLIECdLfE$Sn@|RfE^0qrn7}&P^Umu8eZ|YnCn{}yAfGZQo`UOyRO7>T<_(|g141N9H;O<+m_gl#Bn52DyXV~-yfi=s(w_% zgOrd=rs(g_lFu3#e$55`8@Q=BCZ__YlsHH3hG!6b$?&N^2}aFua_}Irw#O-Lym)f^ zJHvdW#F1-ISK*^9YxL4|aRW)UDje+CI98Q&Ek*ZxHQdH=d94K#wWjJ<4?yMWM2?+6xPC94yBrml~$B zF)UW5kj(}GLIWLvA%6Nt4Kfs^b4-G@Gk^(x8m#WMJt-XDUepFOb9;=2h@*1Wmz+@ebF?66<&h%?J@JU&P6%6jDN6Wq`7X zIqFQPDk{?&=P_;CJQN{FXKi*I7@)bmAKQ^5RCXaHnQS*s<7&@xR#bz`pr;xk5Zj+3 zBts~wIl@DDWG>^v`Ggo+YaB669J@vs3HP|Ej{|{vw7pxgmYnN-X6265@RSlcV*zP# z7ZE2vB_12c^mn08j^sN#waHgb1kWov$H4;!=<6Cl(^WJ>>Bft(*?KCjc`D7(BT!&` z4EQIZh;^Y7ADU(}(7KLH?7-y9@-b4{j0GuOr0dezo8pP*JK1+Qj^oOsE-T1qdP$gh zn6dDPN~r4IdPgV_>%n~fY5d`7<1odJ*_vUmi5@C30+j@N2I25N@3VC52*F(!s%Ma% zF@wzDL8d~QAKM0Jm~z71!I65Ds@scX^&Vo4SOEr37NKSo95lwk?r(I4MM=90@HGA!>{FzS&3}UDrO+u_r(xksz6jBc+Y1NECHsCI~4h z1jUUEpFU!>7DMW+rs)`lS%@|*WJq#dT3QZq_)rH@fSmFj z)`Wd9hVC!-l9U(jlVkoGI&I;P;h7uch+NAck#63L*c~PTj&{2#F4Tkm6xUvPHZ!JI z5{(4$nR?;73|ZA|+rf6+A?Z{6ET6Z@Wb-7G83qPZboaz*YwM@ErGuDdQdU;M;lr)8 zG#|!sT>K%!Mesj(UeDI4BF$)h_ky569)Pz1Kg(yG>O_AA&8(t)=IaVRZ6`jDMAB3j zX4Qi7QAN0vl8)XC+xHJJkjx)9>y3ASLtoDZ`e)VRE?EoFGH>k=EQN#H2;6%s)!Gb< zQ0;L_NsM*wqs+C#eh&ZS=rz)41GjiC$4b%Lw}Hf})yQS%du8WUuaK_T{T#jr|4db2 zzGs#mDuLl;`j@U#P`L^(*G9S)(#?CNGu%S?47c#VVQV=n(}(F#6E5-N_xn*46~Et0 z7$1p5&@>Gp1f887yyNP{eDEW01l!@5TP|YE$1*a3IFOI49F^sg1l|ldO4%}z|2Ehoz&h!7< zJM$R3s{4+A&N+8^Z|2S78QbIC*al1r#$~Tq8d7K^N|lyI6p0qKO`AW0gi5e&=_7i%5^6#`;lOFk~GA5k}XpqXODGvgpPCmo3sr3nf4aKnZkNL2IGB?&q^Ite{N z!jR^dz&H35NCT&dI8Y>y2L2mJI7#(Dok%w3GsF0qAs})PoQ^y~b22l9O&_zbD4!oF zR*WeBzqBr*-|8A9Li)!55)q|hs!UC*is_P+QYIgOWn8MUv{Q`J#eYv1Mqw?<&_z zWiDp7eZ{^^p2>fz&r|OMN#oEU$B(zoYGU-1AO)Fpp6~w0GyL#Jzao>CxHT?Pir)ZV z1KQz}v7k+c(@LE9Lf5Wc9RLqwbd_%dOMKs_r>BRND_7AHl{sI?1X>6<)&7~we&uA- zWh!RLJY9LLa6SUqq-11doCh9!ipPKY68W5kn@Av}cmcQ*XoF9WG_U}O6JKydo*x4R z-~p{!HaIZM&Ykaa@z$-ZS-s|TH}j0aWD-Y2wJ=NWE%CaXw3w3+f*>$ifa0Vblbt6F z!a3PcCO}ELy9W8E`=8(!&%R95Mmo}7oW1EiLoGv6JL0wwmk-9q!bV8kToMi zW4!;~`{Z+ZHf-4N$wU+a6UHf1U?EAp$!f+-TuKC(jtJGqn++_>qEPV2W;0d&Q|hB2 z3^`@yD9PbNC%NyNkMip4dyq1LvfL2ku=#gDH+=e}fdxdIxC0`C!PCH~vc&g5oj*R3 zVb=${$!0TLc;OZjiP(osMAjfd<(Oge?@Hs4=)2-q;t4@;D%P|p_U7%k_Hp009%IkG zHk7JC2*(2+0{)SxjQZ&y4JaXI z{mT(244=%4I7YScp!~b)^@R+}vLcIHG0v>~BJsRnZU+>Ht@qaE9^X>MM+sH%XSiSWDvnM{VZV{J&KSiam;qt0ft ztY4ery3b#1$RTBE!c0?itI6dHV1V zvaO42w&U>O-V;3is`-j%FAh&M-2PVRSr8CgYLI706_BbaxNX(bn-fq%igG#3@F{K`rXe#y_QY%x1l z_^icoDREeFOUnwHo114v9^;KX%)WgekZ=V`fn`Z-OX4^ZHwxQ!UCS(vW$Mt_7Pf8N zZImim<$^$yFL;cN=Qwb%lT5CjEn6;`v@MxTny#)cw!g9+%TiplWlND{Q%N_iHN(S0 zw70i2Ha3RV#i7n&Ok$0v01p-(X<&h?YSk!H-Hu?~EOt6c>uzPsHRGdM4(!`Y)6yij z-tu{_yz+Aden2{%Cl?hj$`^b*FOTO3QId$#8&)Za<0u*$YU$}2%k8vk>g@!*{YR;^iP+7Vx`=dmEVQ3zju&RJjT;M3w;0G*OQpcLr^^A^= z&iK5Wg1dQ^FKdXR0(9vd#U(7c)=#zgLFE#LTzUEVWHLh)H*N&Qaav5KY&OgB<82%`@Dcs}eFQ<# zudNNZ8@L^K3nsh}7c>@~s#cA-qa*e09pDZGw@D%Y7LphGzR!*w2l?QGquhAIR{r9S zFLK3Yo3UMc>fkCNXj+mYpC3GZ7p66Jbv4wbY;w6A$z)ki?jT1H0-Jo&Soy!`S$Zo27G?zsK;xa5*^u^rpIp{t&YR4Re*do!L_M1EDiKfiBz z0LQVYuXoAk^RwFCbzS;<3uJQz>gw!yOGc<SbT(uv8YwGYE+5(qnJt4KbA`TG?{Y8YU)x94g~z<;Wzoy+aBbuuRp}= zuk9t5E0_XOWuDirYtMQvM+iwi@6B(tBOsNcE|s8A$TO35!Ki8unQX}TSa$vzp`$U* z6yvI*?LGhi23tu)K~(-Y@kTC}BMd|G`8@6I?Tn9)M|~pz_5xoAz6|_=MNS$xgCtHl z%G9?IqrU9|zF^t%&Qzm&L&CFbMn*jz{mGkbd*%bKxpp%*-+U>*d);qw&idsj*T%Ax z$;CFqOIrwmQWlx)?4-h+AOy8F4t|d}ox!RI%d!aq$>7i!E7vSDMuK@jDU_omaio+4 zK|p_h9|sQ}q_y=hxoo!h@`wo2it#Ud95`Nme-<;&NaDmOQ{VEK|J;YX@cf6YS=+?5*KOwJKfH|L;gQIJQ4oe=+VO>D*^JHSTRw(>M8YNr z{8?>o@&`Ed^bT|3JkD*$s#-kY&*$@WcX!j;dYBW(k25xAtYp$Eh8DD9a!EXanb0iW zk23{lG;v^p@gR-Ktn(7^1!c>>Of|YU)YLhR`K(Vz=P-{y@g`3{^)8K#37T70P@7!B zs?{s0sjZ1J>`zqRY&)`MeFDC6j$;vqvy)~*2;4+1on1ro=My3Ywr!!qkb!{#Iyz2r zA!fv%KF+@4I? ze~_rL)_R4Ie8!`{KSOVCE3F5PvwX!;Hl4SDbI;#M%ZlYBlBPOvCa^i!g-rFvhlnPEL*k|*L6uIlZ=cEp@VS7d7rsL3NdR(Ppvhn+7#VAeFUCA`)~^x zB~(L`OXoOovXA$79pd#j_Of$lD<@72;uRD^8nQS^3n@h>#&Y=-Mg<UAqQ|AKR=Z>S?{ zr}6y&JL#6Z2j)Qn&GiU1H8q@Q%QBwM(a?~Z*oHXspefLUm-FcBAK~zklf1v{FgxEn z#J+v)ba#!C&w*4Hj+>ySP9c>TL$DjobHJ~F13+;Ab~X?u##u|8809xRBGCZwOH3@t z1|hiKa^+2yBd{Wpu3NIf}$a%CLJWA`qVgX z#uTpW7;lBxt*0`h_>9n@p0yOyTI0G2GFi>g&=?I(^%UKXwI=ie#?u*kdxtsN*2TX4 z$Jw**7zYoXq@!bqu@R5Jmnh4^c50|=lt?Ag2o9im8F&HBE+W^M$@=hY!C6n7a#Ts7 z05iaCMe`W2Q3$TFY6*4znDUC_Eyh?MaI8%rwO8PEDEH#iyVA%>~Nu&~S z+)qIW1~GMt_F*b9@5aox^kXD#5ucG|a(qUdkAW$Ou82ictYj4J}x#}@{cpYaVGFh#Cg|DwEGC`$J!#Kf?8 z7*l!%^Y9M?{b+hHgg6;3T|jU2`;@K?pAlv5_M(Ix;vqGdV3VG&(Ra3vaZh0000PbVXQnQ*UN;cVTj606}DL gVr3vnZDD6+Qe|Oed2z{QJOBUy07*qoM6N<$f|# +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/jsonpath/README.md b/vendor/github.com/PaesslerAG/jsonpath/README.md new file mode 100644 index 000000000..0919a775c --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/README.md @@ -0,0 +1,11 @@ +JSONPath +==== + +[![Build Status](https://api.travis-ci.org/PaesslerAG/jsonpath.svg?branch=master)](https://travis-ci.org/PaesslerAG/jsonpath) +[![Godoc](https://godoc.org/github.com/PaesslerAG/jsonpath?status.png)](https://godoc.org/github.com/PaesslerAG/jsonpath) + +JSONPath is a complete implementation of [http://goessner.net/articles/JsonPath/](http://goessner.net/articles/JsonPath/). +JSONPath can be combined with a script language. In many web samples it's combined with javascript. This framework comes without a script language but can be easily extended with one. See [example](https://godoc.org/github.com/PaesslerAG/jsonpath#example-package--Gval). + +It is based on [Gval](https://github.com/PaesslerAG/gval) and can be combined with the modular expression languages based on gval. +So for script features like multiply, length, regex or many more take a look at the documentation in the [GoDoc](https://godoc.org/github.com/PaesslerAG/jsonpath). \ No newline at end of file diff --git a/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go b/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go new file mode 100644 index 000000000..cada95c9a --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/jsonpath.go @@ -0,0 +1,54 @@ +// Package jsonpath is an implementation of http://goessner.net/articles/JsonPath/ +// If a JSONPath contains one of +// [key1, key2 ...], .., *, [min:max], [min:max:step], (? expression) +// all matchs are listed in an []interface{} +// +// The package comes with an extension of JSONPath to access the wildcard values of a match. +// If the JSONPath is used inside of a JSON object, you can use placeholder '#' or '#i' with natural number i +// to access all wildcards values or the ith wildcard +// +// This package can be extended with gval modules for script features like multiply, length, regex or many more. +// So take a look at github.com/PaesslerAG/gval. +package jsonpath + +import ( + "context" + + "github.com/PaesslerAG/gval" +) + +// New returns an selector for given JSONPath +func New(path string) (gval.Evaluable, error) { + return lang.NewEvaluable(path) +} + +//Get executes given JSONPath on given value +func Get(path string, value interface{}) (interface{}, error) { + eval, err := lang.NewEvaluable(path) + if err != nil { + return nil, err + } + return eval(context.Background(), value) +} + +var lang = gval.NewLanguage( + gval.Base(), + gval.PrefixExtension('$', parseRootPath), + gval.PrefixExtension('@', parseCurrentPath), +) + +//Language is the JSONPath Language +func Language() gval.Language { + return lang +} + +var placeholderExtension = gval.NewLanguage( + lang, + gval.PrefixExtension('{', parseJSONObject), + gval.PrefixExtension('#', parsePlaceholder), +) + +//PlaceholderExtension is the JSONPath Language with placeholder +func PlaceholderExtension() gval.Language { + return placeholderExtension +} diff --git a/vendor/github.com/PaesslerAG/jsonpath/parse.go b/vendor/github.com/PaesslerAG/jsonpath/parse.go new file mode 100644 index 000000000..18a4cb37c --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/parse.go @@ -0,0 +1,204 @@ +package jsonpath + +import ( + "context" + "fmt" + "math" + "text/scanner" + + "github.com/PaesslerAG/gval" +) + +type parser struct { + *gval.Parser + path path +} + +func parseRootPath(ctx context.Context, gParser *gval.Parser) (r gval.Evaluable, err error) { + p := newParser(gParser) + return p.parse(ctx) +} + +func parseCurrentPath(ctx context.Context, gParser *gval.Parser) (r gval.Evaluable, err error) { + p := newParser(gParser) + p.appendPlainSelector(currentElementSelector()) + return p.parse(ctx) +} + +func newParser(p *gval.Parser) *parser { + return &parser{Parser: p, path: plainPath{}} +} + +func (p *parser) parse(c context.Context) (r gval.Evaluable, err error) { + err = p.parsePath(c) + + if err != nil { + return nil, err + } + return p.path.evaluate, nil +} + +func (p *parser) parsePath(c context.Context) error { + switch p.Scan() { + case '.': + return p.parseSelect(c) + case '[': + keys, seperator, err := p.parseBracket(c) + + if err != nil { + return err + } + + switch seperator { + case ':': + if len(keys) > 3 { + return fmt.Errorf("range query has at least the parameter [min:max:step]") + } + keys = append(keys, []gval.Evaluable{ + p.Const(0), p.Const(float64(math.MaxInt32)), p.Const(1)}[len(keys):]...) + p.appendAmbiguousSelector(rangeSelector(keys[0], keys[1], keys[2])) + case '?': + if len(keys) != 1 { + return fmt.Errorf("filter needs exactly one key") + } + p.appendAmbiguousSelector(filterSelector(keys[0])) + default: + if len(keys) == 1 { + p.appendPlainSelector(directSelector(keys[0])) + } else { + p.appendAmbiguousSelector(multiSelector(keys)) + } + } + return p.parsePath(c) + case '(': + return p.parseScript(c) + default: + p.Camouflage("jsonpath", '.', '[', '(') + return nil + } +} + +func (p *parser) parseSelect(c context.Context) error { + scan := p.Scan() + switch scan { + case scanner.Ident: + p.appendPlainSelector(directSelector(p.Const(p.TokenText()))) + return p.parsePath(c) + case '.': + p.appendAmbiguousSelector(mapperSelector()) + return p.parseMapper(c) + case '*': + p.appendAmbiguousSelector(starSelector()) + return p.parsePath(c) + default: + return p.Expected("JSON select", scanner.Ident, '.', '*') + } +} + +func (p *parser) parseBracket(c context.Context) (keys []gval.Evaluable, seperator rune, err error) { + for { + scan := p.Scan() + skipScan := false + switch scan { + case '?': + skipScan = true + case ':': + i := float64(0) + if len(keys) == 1 { + i = math.MaxInt32 + } + keys = append(keys, p.Const(i)) + skipScan = true + case '*': + if p.Scan() != ']' { + return nil, 0, p.Expected("JSON bracket star", ']') + } + return []gval.Evaluable{}, 0, nil + case ']': + if seperator == ':' { + skipScan = true + break + } + fallthrough + default: + p.Camouflage("jsonpath brackets") + key, err := p.ParseExpression(c) + if err != nil { + return nil, 0, err + } + keys = append(keys, key) + } + if !skipScan { + scan = p.Scan() + } + if seperator == 0 { + seperator = scan + } + switch scan { + case ':', ',': + case ']': + return + case '?': + if len(keys) != 0 { + return nil, 0, p.Expected("JSON filter", ']') + } + default: + return nil, 0, p.Expected("JSON bracket separator", ':', ',') + } + if seperator != scan { + return nil, 0, fmt.Errorf("mixed %v and %v in JSON bracket", seperator, scan) + } + } +} + +func (p *parser) parseMapper(c context.Context) error { + scan := p.Scan() + switch scan { + case scanner.Ident: + p.appendPlainSelector(directSelector(p.Const(p.TokenText()))) + case '[': + keys, seperator, err := p.parseBracket(c) + + if err != nil { + return err + } + switch seperator { + case ':': + return fmt.Errorf("mapper can not be combined with range query") + case '?': + if len(keys) != 1 { + return fmt.Errorf("filter needs exactly one key") + } + p.appendAmbiguousSelector(filterSelector(keys[0])) + default: + p.appendAmbiguousSelector(multiSelector(keys)) + } + case '*': + p.appendAmbiguousSelector(starSelector()) + case '(': + return p.parseScript(c) + default: + return p.Expected("JSON mapper", '[', scanner.Ident, '*') + } + return p.parsePath(c) +} + +func (p *parser) parseScript(c context.Context) error { + script, err := p.ParseExpression(c) + if err != nil { + return err + } + if p.Scan() != ')' { + return p.Expected("jsnopath script", ')') + } + p.appendPlainSelector(newScript(script)) + return p.parsePath(c) +} + +func (p *parser) appendPlainSelector(next plainSelector) { + p.path = p.path.withPlainSelector(next) +} + +func (p *parser) appendAmbiguousSelector(next ambiguousSelector) { + p.path = p.path.withAmbiguousSelector(next) +} diff --git a/vendor/github.com/PaesslerAG/jsonpath/path.go b/vendor/github.com/PaesslerAG/jsonpath/path.go new file mode 100644 index 000000000..b8e784d84 --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/path.go @@ -0,0 +1,103 @@ +package jsonpath + +import "context" + +type path interface { + evaluate(c context.Context, parameter interface{}) (interface{}, error) + visitMatchs(c context.Context, r interface{}, visit pathMatcher) + withPlainSelector(plainSelector) path + withAmbiguousSelector(ambiguousSelector) path +} + +type plainPath []plainSelector + +type ambiguousMatcher func(key, v interface{}) + +func (p plainPath) evaluate(ctx context.Context, root interface{}) (interface{}, error) { + return p.evaluatePath(ctx, root, root) +} + +func (p plainPath) evaluatePath(ctx context.Context, root, value interface{}) (interface{}, error) { + var err error + for _, sel := range p { + value, err = sel(ctx, root, value) + if err != nil { + return nil, err + } + } + return value, nil +} + +func (p plainPath) matcher(ctx context.Context, r interface{}, match ambiguousMatcher) ambiguousMatcher { + if len(p) == 0 { + return match + } + return func(k, v interface{}) { + res, err := p.evaluatePath(ctx, r, v) + if err == nil { + match(k, res) + } + } +} + +func (p plainPath) visitMatchs(ctx context.Context, r interface{}, visit pathMatcher) { + res, err := p.evaluatePath(ctx, r, r) + if err == nil { + visit(nil, res) + } +} + +func (p plainPath) withPlainSelector(selector plainSelector) path { + return append(p, selector) +} +func (p plainPath) withAmbiguousSelector(selector ambiguousSelector) path { + return &ambiguousPath{ + parent: p, + branch: selector, + } +} + +type ambiguousPath struct { + parent path + branch ambiguousSelector + ending plainPath +} + +func (p *ambiguousPath) evaluate(ctx context.Context, parameter interface{}) (interface{}, error) { + matchs := []interface{}{} + p.visitMatchs(ctx, parameter, func(keys []interface{}, match interface{}) { + matchs = append(matchs, match) + }) + return matchs, nil +} + +func (p *ambiguousPath) visitMatchs(ctx context.Context, r interface{}, visit pathMatcher) { + p.parent.visitMatchs(ctx, r, func(keys []interface{}, v interface{}) { + p.branch(ctx, r, v, p.ending.matcher(ctx, r, visit.matcher(keys))) + }) +} + +func (p *ambiguousPath) branchMatcher(ctx context.Context, r interface{}, m ambiguousMatcher) ambiguousMatcher { + return func(k, v interface{}) { + p.branch(ctx, r, v, m) + } +} + +func (p *ambiguousPath) withPlainSelector(selector plainSelector) path { + p.ending = append(p.ending, selector) + return p +} +func (p *ambiguousPath) withAmbiguousSelector(selector ambiguousSelector) path { + return &ambiguousPath{ + parent: p, + branch: selector, + } +} + +type pathMatcher func(keys []interface{}, match interface{}) + +func (m pathMatcher) matcher(keys []interface{}) ambiguousMatcher { + return func(key, match interface{}) { + m(append(keys, key), match) + } +} diff --git a/vendor/github.com/PaesslerAG/jsonpath/placeholder.go b/vendor/github.com/PaesslerAG/jsonpath/placeholder.go new file mode 100644 index 000000000..d1cd063f4 --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/placeholder.go @@ -0,0 +1,181 @@ +package jsonpath + +import ( + "bytes" + "context" + "fmt" + "strconv" + "text/scanner" + + "github.com/PaesslerAG/gval" +) + +type keyValueVisitor func(key string, value interface{}) + +type jsonObject interface { + visitElements(c context.Context, v interface{}, visit keyValueVisitor) error +} + +type jsonObjectSlice []jsonObject + +type keyValuePair struct { + key gval.Evaluable + value gval.Evaluable +} + +type keyValueMatcher struct { + key gval.Evaluable + matcher func(c context.Context, r interface{}, visit pathMatcher) +} + +func parseJSONObject(ctx context.Context, p *gval.Parser) (gval.Evaluable, error) { + evals := jsonObjectSlice{} + for { + switch p.Scan() { + default: + hasWildcard := false + + p.Camouflage("object", ',', '}') + key, err := p.ParseExpression(context.WithValue(ctx, hasPlaceholdersContextKey{}, &hasWildcard)) + if err != nil { + return nil, err + } + if p.Scan() != ':' { + if err != nil { + return nil, p.Expected("object", ':') + } + } + e, err := parseJSONObjectElement(ctx, p, hasWildcard, key) + if err != nil { + return nil, err + } + evals.addElements(e) + case ',': + case '}': + return evals.evaluable, nil + } + } +} + +func parseJSONObjectElement(ctx context.Context, gParser *gval.Parser, hasWildcard bool, key gval.Evaluable) (jsonObject, error) { + if hasWildcard { + p := newParser(gParser) + switch gParser.Scan() { + case '$': + case '@': + p.appendPlainSelector(currentElementSelector()) + default: + return nil, p.Expected("JSONPath key and value") + } + + if err := p.parsePath(ctx); err != nil { + return nil, err + } + return keyValueMatcher{key, p.path.visitMatchs}, nil + } + value, err := gParser.ParseExpression(ctx) + if err != nil { + return nil, err + } + return keyValuePair{key, value}, nil +} + +func (kv keyValuePair) visitElements(c context.Context, v interface{}, visit keyValueVisitor) error { + value, err := kv.value(c, v) + if err != nil { + return err + } + key, err := kv.key.EvalString(c, v) + if err != nil { + return err + } + visit(key, value) + return nil +} + +func (kv keyValueMatcher) visitElements(c context.Context, v interface{}, visit keyValueVisitor) (err error) { + kv.matcher(c, v, func(keys []interface{}, match interface{}) { + key, er := kv.key.EvalString(context.WithValue(c, placeholdersContextKey{}, keys), v) + if er != nil { + err = er + } + visit(key, match) + }) + return +} + +func (j *jsonObjectSlice) addElements(e jsonObject) { + *j = append(*j, e) +} + +func (j jsonObjectSlice) evaluable(c context.Context, v interface{}) (interface{}, error) { + vs := map[string]interface{}{} + + err := j.visitElements(c, v, func(key string, value interface{}) { vs[key] = value }) + if err != nil { + return nil, err + } + return vs, nil +} + +func (j jsonObjectSlice) visitElements(ctx context.Context, v interface{}, visit keyValueVisitor) (err error) { + for _, e := range j { + if err := e.visitElements(ctx, v, visit); err != nil { + return err + } + } + return nil +} + +func parsePlaceholder(c context.Context, p *gval.Parser) (gval.Evaluable, error) { + hasWildcard := c.Value(hasPlaceholdersContextKey{}) + if hasWildcard == nil { + return nil, fmt.Errorf("JSONPath placeholder must only be used in an JSON object key") + } + *(hasWildcard.(*bool)) = true + switch p.Scan() { + case scanner.Int: + id, err := strconv.Atoi(p.TokenText()) + if err != nil { + return nil, err + } + return placeholder(id).evaluable, nil + default: + p.Camouflage("JSONPath placeholder") + return allPlaceholders.evaluable, nil + } +} + +type hasPlaceholdersContextKey struct{} + +type placeholdersContextKey struct{} + +type placeholder int + +const allPlaceholders = placeholder(-1) + +func (key placeholder) evaluable(c context.Context, v interface{}) (interface{}, error) { + wildcards, ok := c.Value(placeholdersContextKey{}).([]interface{}) + if !ok || len(wildcards) <= int(key) { + return nil, fmt.Errorf("JSONPath placeholder #%d is not available", key) + } + if key == allPlaceholders { + sb := bytes.Buffer{} + sb.WriteString("$") + quoteWildcardValues(&sb, wildcards) + return sb.String(), nil + } + return wildcards[int(key)], nil +} + +func quoteWildcardValues(sb *bytes.Buffer, wildcards []interface{}) { + for _, w := range wildcards { + if wildcards, ok := w.([]interface{}); ok { + quoteWildcardValues(sb, wildcards) + continue + } + sb.WriteString(fmt.Sprintf("[%v]", + strconv.Quote(fmt.Sprint(w)), + )) + } +} diff --git a/vendor/github.com/PaesslerAG/jsonpath/selector.go b/vendor/github.com/PaesslerAG/jsonpath/selector.go new file mode 100644 index 000000000..46670c249 --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/selector.go @@ -0,0 +1,203 @@ +package jsonpath + +import ( + "context" + "fmt" + "strconv" + + "github.com/PaesslerAG/gval" +) + +//plainSelector evaluate exactly one result +type plainSelector func(c context.Context, r, v interface{}) (interface{}, error) + +//ambiguousSelector evaluate wildcard +type ambiguousSelector func(c context.Context, r, v interface{}, match ambiguousMatcher) + +//@ +func currentElementSelector() plainSelector { + return func(c context.Context, r, v interface{}) (interface{}, error) { + return c.Value(currentElement{}), nil + } +} + +type currentElement struct{} + +func currentContext(c context.Context, v interface{}) context.Context { + return context.WithValue(c, currentElement{}, v) +} + +//.x, [x] +func directSelector(key gval.Evaluable) plainSelector { + return func(c context.Context, r, v interface{}) (interface{}, error) { + + e, _, err := selectValue(c, key, r, v) + if err != nil { + return nil, err + } + + return e, nil + } +} + +// * / [*] +func starSelector() ambiguousSelector { + return func(c context.Context, r, v interface{}, match ambiguousMatcher) { + visitAll(v, func(key string, val interface{}) { match(key, val) }) + } +} + +// [x, ...] +func multiSelector(keys []gval.Evaluable) ambiguousSelector { + if len(keys) == 0 { + return starSelector() + } + return func(c context.Context, r, v interface{}, match ambiguousMatcher) { + for _, k := range keys { + e, wildcard, err := selectValue(c, k, r, v) + if err != nil { + continue + } + match(wildcard, e) + } + } +} + +func selectValue(c context.Context, key gval.Evaluable, r, v interface{}) (value interface{}, jkey string, err error) { + c = currentContext(c, v) + switch o := v.(type) { + case []interface{}: + i, err := key.EvalInt(c, r) + if err != nil { + return nil, "", fmt.Errorf("could not select value, invalid key: %s", err) + } + if i < 0 || i >= len(o) { + return nil, "", fmt.Errorf("index %d out of bounds", i) + } + return o[i], strconv.Itoa(i), nil + case map[string]interface{}: + k, err := key.EvalString(c, r) + if err != nil { + return nil, "", fmt.Errorf("could not select value, invalid key: %s", err) + } + + if r, ok := o[k]; ok { + return r, k, nil + } + return nil, "", fmt.Errorf("unknown key %s", k) + + default: + return nil, "", fmt.Errorf("unsupported value type %T for select, expected map[string]interface{} or []interface{}", o) + } +} + +//.. +func mapperSelector() ambiguousSelector { + return mapper +} + +func mapper(c context.Context, r, v interface{}, match ambiguousMatcher) { + match([]interface{}{}, v) + visitAll(v, func(wildcard string, v interface{}) { + mapper(c, r, v, func(key interface{}, v interface{}) { + match(append([]interface{}{wildcard}, key.([]interface{})...), v) + }) + }) +} + +func visitAll(v interface{}, visit func(key string, v interface{})) { + switch v := v.(type) { + case []interface{}: + for i, e := range v { + k := strconv.Itoa(i) + visit(k, e) + } + case map[string]interface{}: + for k, e := range v { + visit(k, e) + } + } + +} + +//[? ] +func filterSelector(filter gval.Evaluable) ambiguousSelector { + return func(c context.Context, r, v interface{}, match ambiguousMatcher) { + visitAll(v, func(wildcard string, v interface{}) { + condition, err := filter.EvalBool(currentContext(c, v), r) + if err != nil { + return + } + if condition { + match(wildcard, v) + } + }) + } +} + +//[::] +func rangeSelector(min, max, step gval.Evaluable) ambiguousSelector { + return func(c context.Context, r, v interface{}, match ambiguousMatcher) { + cs, ok := v.([]interface{}) + if !ok { + return + } + + c = currentContext(c, v) + + min, err := min.EvalInt(c, r) + if err != nil { + return + } + max, err := max.EvalInt(c, r) + if err != nil { + return + } + step, err := step.EvalInt(c, r) + if err != nil { + return + } + + if min > max { + return + } + + n := len(cs) + min = negmax(min, n) + max = negmax(max, n) + + if step == 0 { + step = 1 + } + + if step > 0 { + for i := min; i < max; i += step { + match(strconv.Itoa(i), cs[i]) + } + } else { + for i := max - 1; i >= min; i += step { + match(strconv.Itoa(i), cs[i]) + } + } + + } +} + +func negmax(n, max int) int { + if n < 0 { + n = max + n + if n < 0 { + n = 0 + } + } else if n > max { + return max + } + return n +} + +// () +func newScript(script gval.Evaluable) plainSelector { + return func(c context.Context, r, v interface{}) (interface{}, error) { + return script(currentContext(c, v), r) + } +} diff --git a/vendor/github.com/PaesslerAG/jsonpath/test.sh b/vendor/github.com/PaesslerAG/jsonpath/test.sh new file mode 100644 index 000000000..09ae6b98e --- /dev/null +++ b/vendor/github.com/PaesslerAG/jsonpath/test.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# Script that runs tests, code coverage, and benchmarks all at once. + +JSONPath_PATH=$HOME/gopath/src/github.com/PaesslerAG/jsonpath + +# run the actual tests. +cd "${JSONPath_PATH}" +go test -bench=. -benchmem -coverprofile coverage.out +status=$? + +if [ "${status}" != 0 ]; +then + exit $status +fi diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/context.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/context.go new file mode 100644 index 000000000..15298ec18 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/context.go @@ -0,0 +1,18 @@ +package manifestclient + +import ( + "context" +) + +type ctxKey struct{} + +var controllerNameCtxKey = ctxKey{} + +func WithControllerInstanceNameFromContext(ctx context.Context, name string) context.Context { + return context.WithValue(ctx, controllerNameCtxKey, name) +} + +func ControllerInstanceNameFromContext(ctx context.Context) string { + val, _ := ctx.Value(controllerNameCtxKey).(string) + return val +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/README.md b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/README.md new file mode 100644 index 000000000..3d8440e89 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/README.md @@ -0,0 +1,2 @@ +Just extracted from a random oc adm inspect. +It beats nothing for now. \ No newline at end of file diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-api.yaml b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-api.yaml new file mode 100644 index 000000000..dfe7a756d --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-api.yaml @@ -0,0 +1,493 @@ +apiVersion: apidiscovery.k8s.io/v2 +items: +- metadata: + creationTimestamp: null + versions: + - freshness: Current + resources: + - resource: bindings + responseKind: + group: "" + kind: Binding + version: "" + scope: Namespaced + singularResource: binding + verbs: + - create + - resource: componentstatuses + responseKind: + group: "" + kind: ComponentStatus + version: "" + scope: Cluster + shortNames: + - cs + singularResource: componentstatus + verbs: + - get + - list + - resource: configmaps + responseKind: + group: "" + kind: ConfigMap + version: "" + scope: Namespaced + shortNames: + - cm + singularResource: configmap + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: endpoints + responseKind: + group: "" + kind: Endpoints + version: "" + scope: Namespaced + shortNames: + - ep + singularResource: endpoints + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: events + responseKind: + group: "" + kind: Event + version: "" + scope: Namespaced + shortNames: + - ev + singularResource: event + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: limitranges + responseKind: + group: "" + kind: LimitRange + version: "" + scope: Namespaced + shortNames: + - limits + singularResource: limitrange + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: namespaces + responseKind: + group: "" + kind: Namespace + version: "" + scope: Cluster + shortNames: + - ns + singularResource: namespace + subresources: + - responseKind: + group: "" + kind: Namespace + version: "" + subresource: finalize + verbs: + - update + - responseKind: + group: "" + kind: Namespace + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - resource: nodes + responseKind: + group: "" + kind: Node + version: "" + scope: Cluster + shortNames: + - "no" + singularResource: node + subresources: + - responseKind: + group: "" + kind: NodeProxyOptions + version: "" + subresource: proxy + verbs: + - create + - delete + - get + - patch + - update + - responseKind: + group: "" + kind: Node + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: persistentvolumeclaims + responseKind: + group: "" + kind: PersistentVolumeClaim + version: "" + scope: Namespaced + shortNames: + - pvc + singularResource: persistentvolumeclaim + subresources: + - responseKind: + group: "" + kind: PersistentVolumeClaim + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: persistentvolumes + responseKind: + group: "" + kind: PersistentVolume + version: "" + scope: Cluster + shortNames: + - pv + singularResource: persistentvolume + subresources: + - responseKind: + group: "" + kind: PersistentVolume + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: pods + responseKind: + group: "" + kind: Pod + version: "" + scope: Namespaced + shortNames: + - po + singularResource: pod + subresources: + - responseKind: + group: "" + kind: PodAttachOptions + version: "" + subresource: attach + verbs: + - create + - get + - responseKind: + group: "" + kind: Binding + version: "" + subresource: binding + verbs: + - create + - responseKind: + group: "" + kind: Pod + version: "" + subresource: ephemeralcontainers + verbs: + - get + - patch + - update + - responseKind: + group: policy + kind: Eviction + version: v1 + subresource: eviction + verbs: + - create + - responseKind: + group: "" + kind: PodExecOptions + version: "" + subresource: exec + verbs: + - create + - get + - responseKind: + group: "" + kind: Pod + version: "" + subresource: log + verbs: + - get + - responseKind: + group: "" + kind: PodPortForwardOptions + version: "" + subresource: portforward + verbs: + - create + - get + - responseKind: + group: "" + kind: PodProxyOptions + version: "" + subresource: proxy + verbs: + - create + - delete + - get + - patch + - update + - responseKind: + group: "" + kind: Pod + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: podtemplates + responseKind: + group: "" + kind: PodTemplate + version: "" + scope: Namespaced + singularResource: podtemplate + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: replicationcontrollers + responseKind: + group: "" + kind: ReplicationController + version: "" + scope: Namespaced + shortNames: + - rc + singularResource: replicationcontroller + subresources: + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: ReplicationController + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: resourcequotas + responseKind: + group: "" + kind: ResourceQuota + version: "" + scope: Namespaced + shortNames: + - quota + singularResource: resourcequota + subresources: + - responseKind: + group: "" + kind: ResourceQuota + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: secrets + responseKind: + group: "" + kind: Secret + version: "" + scope: Namespaced + singularResource: secret + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: serviceaccounts + responseKind: + group: "" + kind: ServiceAccount + version: "" + scope: Namespaced + shortNames: + - sa + singularResource: serviceaccount + subresources: + - responseKind: + group: authentication.k8s.io + kind: TokenRequest + version: v1 + subresource: token + verbs: + - create + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: services + responseKind: + group: "" + kind: Service + version: "" + scope: Namespaced + shortNames: + - svc + singularResource: service + subresources: + - responseKind: + group: "" + kind: ServiceProxyOptions + version: "" + subresource: proxy + verbs: + - create + - delete + - get + - patch + - update + - responseKind: + group: "" + kind: Service + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +kind: APIGroupDiscoveryList +metadata: {} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-apis.yaml b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-apis.yaml new file mode 100644 index 000000000..50bb6dfb1 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/default-discovery/aggregated-discovery-apis.yaml @@ -0,0 +1,5720 @@ +apiVersion: apidiscovery.k8s.io/v2 +items: +- metadata: + creationTimestamp: null + name: apiregistration.k8s.io + versions: + - freshness: Current + resources: + - categories: + - api-extensions + resource: apiservices + responseKind: + group: "" + kind: APIService + version: "" + scope: Cluster + singularResource: apiservice + subresources: + - responseKind: + group: "" + kind: APIService + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: apps + versions: + - freshness: Current + resources: + - resource: controllerrevisions + responseKind: + group: "" + kind: ControllerRevision + version: "" + scope: Namespaced + singularResource: controllerrevision + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: daemonsets + responseKind: + group: "" + kind: DaemonSet + version: "" + scope: Namespaced + shortNames: + - ds + singularResource: daemonset + subresources: + - responseKind: + group: "" + kind: DaemonSet + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: deployments + responseKind: + group: "" + kind: Deployment + version: "" + scope: Namespaced + shortNames: + - deploy + singularResource: deployment + subresources: + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: Deployment + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: replicasets + responseKind: + group: "" + kind: ReplicaSet + version: "" + scope: Namespaced + shortNames: + - rs + singularResource: replicaset + subresources: + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: ReplicaSet + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: statefulsets + responseKind: + group: "" + kind: StatefulSet + version: "" + scope: Namespaced + shortNames: + - sts + singularResource: statefulset + subresources: + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: StatefulSet + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: events.k8s.io + versions: + - freshness: Current + resources: + - resource: events + responseKind: + group: "" + kind: Event + version: "" + scope: Namespaced + shortNames: + - ev + singularResource: event + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: authentication.k8s.io + versions: + - freshness: Current + resources: + - resource: selfsubjectreviews + responseKind: + group: "" + kind: SelfSubjectReview + version: "" + scope: Cluster + singularResource: selfsubjectreview + verbs: + - create + - resource: tokenreviews + responseKind: + group: "" + kind: TokenReview + version: "" + scope: Cluster + singularResource: tokenreview + verbs: + - create + version: v1 +- metadata: + creationTimestamp: null + name: authorization.k8s.io + versions: + - freshness: Current + resources: + - resource: localsubjectaccessreviews + responseKind: + group: "" + kind: LocalSubjectAccessReview + version: "" + scope: Namespaced + singularResource: localsubjectaccessreview + verbs: + - create + - resource: selfsubjectaccessreviews + responseKind: + group: "" + kind: SelfSubjectAccessReview + version: "" + scope: Cluster + singularResource: selfsubjectaccessreview + verbs: + - create + - resource: selfsubjectrulesreviews + responseKind: + group: "" + kind: SelfSubjectRulesReview + version: "" + scope: Cluster + singularResource: selfsubjectrulesreview + verbs: + - create + - resource: subjectaccessreviews + responseKind: + group: "" + kind: SubjectAccessReview + version: "" + scope: Cluster + singularResource: subjectaccessreview + verbs: + - create + version: v1 +- metadata: + creationTimestamp: null + name: autoscaling + versions: + - freshness: Current + resources: + - categories: + - all + resource: horizontalpodautoscalers + responseKind: + group: "" + kind: HorizontalPodAutoscaler + version: "" + scope: Namespaced + shortNames: + - hpa + singularResource: horizontalpodautoscaler + subresources: + - responseKind: + group: "" + kind: HorizontalPodAutoscaler + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v2 + - freshness: Current + resources: + - categories: + - all + resource: horizontalpodautoscalers + responseKind: + group: "" + kind: HorizontalPodAutoscaler + version: "" + scope: Namespaced + shortNames: + - hpa + singularResource: horizontalpodautoscaler + subresources: + - responseKind: + group: "" + kind: HorizontalPodAutoscaler + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: batch + versions: + - freshness: Current + resources: + - categories: + - all + resource: cronjobs + responseKind: + group: "" + kind: CronJob + version: "" + scope: Namespaced + shortNames: + - cj + singularResource: cronjob + subresources: + - responseKind: + group: "" + kind: CronJob + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: jobs + responseKind: + group: "" + kind: Job + version: "" + scope: Namespaced + singularResource: job + subresources: + - responseKind: + group: "" + kind: Job + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: certificates.k8s.io + versions: + - freshness: Current + resources: + - resource: certificatesigningrequests + responseKind: + group: "" + kind: CertificateSigningRequest + version: "" + scope: Cluster + shortNames: + - csr + singularResource: certificatesigningrequest + subresources: + - responseKind: + group: "" + kind: CertificateSigningRequest + version: "" + subresource: approval + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: CertificateSigningRequest + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: networking.k8s.io + versions: + - freshness: Current + resources: + - resource: ingressclasses + responseKind: + group: "" + kind: IngressClass + version: "" + scope: Cluster + singularResource: ingressclass + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: ingresses + responseKind: + group: "" + kind: Ingress + version: "" + scope: Namespaced + shortNames: + - ing + singularResource: ingress + subresources: + - responseKind: + group: "" + kind: Ingress + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: networkpolicies + responseKind: + group: "" + kind: NetworkPolicy + version: "" + scope: Namespaced + shortNames: + - netpol + singularResource: networkpolicy + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: policy + versions: + - freshness: Current + resources: + - resource: poddisruptionbudgets + responseKind: + group: "" + kind: PodDisruptionBudget + version: "" + scope: Namespaced + shortNames: + - pdb + singularResource: poddisruptionbudget + subresources: + - responseKind: + group: "" + kind: PodDisruptionBudget + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: rbac.authorization.k8s.io + versions: + - freshness: Current + resources: + - resource: clusterrolebindings + responseKind: + group: "" + kind: ClusterRoleBinding + version: "" + scope: Cluster + singularResource: clusterrolebinding + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: clusterroles + responseKind: + group: "" + kind: ClusterRole + version: "" + scope: Cluster + singularResource: clusterrole + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: rolebindings + responseKind: + group: "" + kind: RoleBinding + version: "" + scope: Namespaced + singularResource: rolebinding + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: roles + responseKind: + group: "" + kind: Role + version: "" + scope: Namespaced + singularResource: role + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: storage.k8s.io + versions: + - freshness: Current + resources: + - resource: csidrivers + responseKind: + group: "" + kind: CSIDriver + version: "" + scope: Cluster + singularResource: csidriver + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: csinodes + responseKind: + group: "" + kind: CSINode + version: "" + scope: Cluster + singularResource: csinode + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: csistoragecapacities + responseKind: + group: "" + kind: CSIStorageCapacity + version: "" + scope: Namespaced + singularResource: csistoragecapacity + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: storageclasses + responseKind: + group: "" + kind: StorageClass + version: "" + scope: Cluster + shortNames: + - sc + singularResource: storageclass + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: volumeattachments + responseKind: + group: "" + kind: VolumeAttachment + version: "" + scope: Cluster + singularResource: volumeattachment + subresources: + - responseKind: + group: "" + kind: VolumeAttachment + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: admissionregistration.k8s.io + versions: + - freshness: Current + resources: + - categories: + - api-extensions + resource: mutatingwebhookconfigurations + responseKind: + group: "" + kind: MutatingWebhookConfiguration + version: "" + scope: Cluster + singularResource: mutatingwebhookconfiguration + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - api-extensions + resource: validatingadmissionpolicies + responseKind: + group: "" + kind: ValidatingAdmissionPolicy + version: "" + scope: Cluster + singularResource: validatingadmissionpolicy + subresources: + - responseKind: + group: "" + kind: ValidatingAdmissionPolicy + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - api-extensions + resource: validatingadmissionpolicybindings + responseKind: + group: "" + kind: ValidatingAdmissionPolicyBinding + version: "" + scope: Cluster + singularResource: validatingadmissionpolicybinding + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - api-extensions + resource: validatingwebhookconfigurations + responseKind: + group: "" + kind: ValidatingWebhookConfiguration + version: "" + scope: Cluster + singularResource: validatingwebhookconfiguration + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 + - freshness: Current + resources: + - categories: + - api-extensions + resource: validatingadmissionpolicies + responseKind: + group: "" + kind: ValidatingAdmissionPolicy + version: "" + scope: Cluster + singularResource: validatingadmissionpolicy + subresources: + - responseKind: + group: "" + kind: ValidatingAdmissionPolicy + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - api-extensions + resource: validatingadmissionpolicybindings + responseKind: + group: "" + kind: ValidatingAdmissionPolicyBinding + version: "" + scope: Cluster + singularResource: validatingadmissionpolicybinding + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1beta1 +- metadata: + creationTimestamp: null + name: apiextensions.k8s.io + versions: + - freshness: Current + resources: + - categories: + - api-extensions + resource: customresourcedefinitions + responseKind: + group: "" + kind: CustomResourceDefinition + version: "" + scope: Cluster + shortNames: + - crd + - crds + singularResource: customresourcedefinition + subresources: + - responseKind: + group: "" + kind: CustomResourceDefinition + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: scheduling.k8s.io + versions: + - freshness: Current + resources: + - resource: priorityclasses + responseKind: + group: "" + kind: PriorityClass + version: "" + scope: Cluster + shortNames: + - pc + singularResource: priorityclass + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: coordination.k8s.io + versions: + - freshness: Current + resources: + - resource: leases + responseKind: + group: "" + kind: Lease + version: "" + scope: Namespaced + singularResource: lease + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: node.k8s.io + versions: + - freshness: Current + resources: + - resource: runtimeclasses + responseKind: + group: "" + kind: RuntimeClass + version: "" + scope: Cluster + singularResource: runtimeclass + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: discovery.k8s.io + versions: + - freshness: Current + resources: + - resource: endpointslices + responseKind: + group: "" + kind: EndpointSlice + version: "" + scope: Namespaced + singularResource: endpointslice + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: flowcontrol.apiserver.k8s.io + versions: + - freshness: Current + resources: + - resource: flowschemas + responseKind: + group: "" + kind: FlowSchema + version: "" + scope: Cluster + singularResource: flowschema + subresources: + - responseKind: + group: "" + kind: FlowSchema + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: prioritylevelconfigurations + responseKind: + group: "" + kind: PriorityLevelConfiguration + version: "" + scope: Cluster + singularResource: prioritylevelconfiguration + subresources: + - responseKind: + group: "" + kind: PriorityLevelConfiguration + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: flowschemas + responseKind: + group: "" + kind: FlowSchema + version: "" + scope: Cluster + singularResource: flowschema + subresources: + - responseKind: + group: "" + kind: FlowSchema + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: prioritylevelconfigurations + responseKind: + group: "" + kind: PriorityLevelConfiguration + version: "" + scope: Cluster + singularResource: prioritylevelconfiguration + subresources: + - responseKind: + group: "" + kind: PriorityLevelConfiguration + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1beta3 +- metadata: + creationTimestamp: null + name: apps.openshift.io + versions: + - freshness: Current + resources: + - categories: + - all + resource: deploymentconfigs + responseKind: + group: "" + kind: DeploymentConfig + version: "" + scope: Namespaced + shortNames: + - dc + singularResource: deploymentconfig + subresources: + - responseKind: + group: "" + kind: DeploymentRequest + version: "" + subresource: instantiate + verbs: + - create + - responseKind: + group: "" + kind: DeploymentLog + version: "" + subresource: log + verbs: + - get + - responseKind: + group: "" + kind: DeploymentConfigRollback + version: "" + subresource: rollback + verbs: + - create + - responseKind: + group: extensions + kind: Scale + version: v1beta1 + subresource: scale + verbs: + - get + - patch + - update + - responseKind: + group: "" + kind: DeploymentConfig + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: authorization.openshift.io + versions: + - freshness: Current + resources: + - resource: clusterrolebindings + responseKind: + group: "" + kind: ClusterRoleBinding + version: "" + scope: Cluster + singularResource: clusterrolebinding + verbs: + - create + - delete + - get + - list + - patch + - update + - resource: clusterroles + responseKind: + group: "" + kind: ClusterRole + version: "" + scope: Cluster + singularResource: clusterrole + verbs: + - create + - delete + - get + - list + - patch + - update + - resource: localresourceaccessreviews + responseKind: + group: "" + kind: LocalResourceAccessReview + version: "" + scope: Namespaced + singularResource: localresourceaccessreview + verbs: + - create + - resource: localsubjectaccessreviews + responseKind: + group: "" + kind: LocalSubjectAccessReview + version: "" + scope: Namespaced + singularResource: localsubjectaccessreview + verbs: + - create + - resource: resourceaccessreviews + responseKind: + group: "" + kind: ResourceAccessReview + version: "" + scope: Cluster + singularResource: localresourceaccessreview + verbs: + - create + - resource: rolebindingrestrictions + responseKind: + group: "" + kind: RoleBindingRestriction + version: "" + scope: Namespaced + singularResource: rolebindingrestriction + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: rolebindings + responseKind: + group: "" + kind: RoleBinding + version: "" + scope: Namespaced + singularResource: rolebinding + verbs: + - create + - delete + - get + - list + - patch + - update + - resource: roles + responseKind: + group: "" + kind: Role + version: "" + scope: Namespaced + singularResource: role + verbs: + - create + - delete + - get + - list + - patch + - update + - resource: selfsubjectrulesreviews + responseKind: + group: "" + kind: SelfSubjectRulesReview + version: "" + scope: Namespaced + singularResource: selfsubjectrulesreview + verbs: + - create + - resource: subjectaccessreviews + responseKind: + group: "" + kind: SubjectAccessReview + version: "" + scope: Cluster + singularResource: subjectaccessreview + verbs: + - create + - resource: subjectrulesreviews + responseKind: + group: "" + kind: SubjectRulesReview + version: "" + scope: Namespaced + singularResource: subjectrulesreview + verbs: + - create + version: v1 +- metadata: + creationTimestamp: null + name: build.openshift.io + versions: + - freshness: Current + resources: + - categories: + - all + resource: buildconfigs + responseKind: + group: "" + kind: BuildConfig + version: "" + scope: Namespaced + shortNames: + - bc + singularResource: buildconfig + subresources: + - responseKind: + group: "" + kind: BuildRequest + version: "" + subresource: instantiate + verbs: + - create + - responseKind: + group: "" + kind: BinaryBuildRequestOptions + version: "" + subresource: instantiatebinary + verbs: + - create + - responseKind: + group: "" + kind: Build + version: "" + subresource: webhooks + verbs: + - create + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - categories: + - all + resource: builds + responseKind: + group: "" + kind: Build + version: "" + scope: Namespaced + singularResource: build + subresources: + - responseKind: + group: "" + kind: BuildRequest + version: "" + subresource: clone + verbs: + - create + - responseKind: + group: "" + kind: Build + version: "" + subresource: details + verbs: + - update + - responseKind: + group: "" + kind: BuildLog + version: "" + subresource: log + verbs: + - get + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: image.openshift.io + versions: + - freshness: Current + resources: + - resource: images + responseKind: + group: "" + kind: Image + version: "" + scope: Cluster + singularResource: image + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: imagesignatures + responseKind: + group: "" + kind: ImageSignature + version: "" + scope: Cluster + singularResource: imagesignature + verbs: + - create + - delete + - resource: imagestreamimages + responseKind: + group: "" + kind: ImageStreamImage + version: "" + scope: Namespaced + shortNames: + - isimage + singularResource: imagestreamimage + verbs: + - get + - resource: imagestreamimports + responseKind: + group: "" + kind: ImageStreamImport + version: "" + scope: Namespaced + singularResource: imagestreamimport + verbs: + - create + - resource: imagestreammappings + responseKind: + group: "" + kind: ImageStreamMapping + version: "" + scope: Namespaced + singularResource: imagestreammapping + verbs: + - create + - categories: + - all + resource: imagestreams + responseKind: + group: "" + kind: ImageStream + version: "" + scope: Namespaced + shortNames: + - is + singularResource: imagestream + subresources: + - responseKind: + group: "" + kind: ImageStreamLayers + version: "" + subresource: layers + verbs: + - get + - responseKind: + group: "" + kind: SecretList + version: "" + subresource: secrets + verbs: + - get + - responseKind: + group: "" + kind: ImageStream + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: imagestreamtags + responseKind: + group: "" + kind: ImageStreamTag + version: "" + scope: Namespaced + shortNames: + - istag + singularResource: imagestreamtag + verbs: + - create + - delete + - get + - list + - patch + - update + - resource: imagetags + responseKind: + group: "" + kind: ImageTag + version: "" + scope: Namespaced + shortNames: + - itag + singularResource: imagetag + verbs: + - create + - delete + - get + - list + - patch + - update + version: v1 +- metadata: + creationTimestamp: null + name: oauth.openshift.io + versions: + - freshness: Current + resources: + - resource: oauthaccesstokens + responseKind: + group: "" + kind: OAuthAccessToken + version: "" + scope: Cluster + singularResource: oauthaccesstoken + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: oauthauthorizetokens + responseKind: + group: "" + kind: OAuthAuthorizeToken + version: "" + scope: Cluster + singularResource: oauthauthorizetoken + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: oauthclientauthorizations + responseKind: + group: "" + kind: OAuthClientAuthorization + version: "" + scope: Cluster + singularResource: oauthclientauthorization + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: oauthclients + responseKind: + group: "" + kind: OAuthClient + version: "" + scope: Cluster + singularResource: oauthclient + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: tokenreviews + responseKind: + group: authentication.k8s.io + kind: TokenReview + version: v1 + scope: Cluster + singularResource: tokenreview + verbs: + - create + - resource: useroauthaccesstokens + responseKind: + group: "" + kind: UserOAuthAccessToken + version: "" + scope: Cluster + singularResource: useroauthaccesstoken + verbs: + - delete + - get + - list + - watch + version: v1 +- metadata: + creationTimestamp: null + name: project.openshift.io + versions: + - freshness: Current + resources: + - resource: projectrequests + responseKind: + group: "" + kind: ProjectRequest + version: "" + scope: Cluster + singularResource: projectrequest + verbs: + - create + - list + - resource: projects + responseKind: + group: "" + kind: Project + version: "" + scope: Cluster + singularResource: project + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: quota.openshift.io + versions: + - freshness: Current + resources: + - resource: appliedclusterresourcequotas + responseKind: + group: "" + kind: AppliedClusterResourceQuota + version: "" + scope: Namespaced + singularResource: appliedclusterresourcequota + verbs: + - get + - list + - resource: clusterresourcequotas + responseKind: + group: "" + kind: ClusterResourceQuota + version: "" + scope: Cluster + shortNames: + - clusterquota + singularResource: clusterresourcequota + subresources: + - responseKind: + group: "" + kind: ClusterResourceQuota + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: route.openshift.io + versions: + - freshness: Current + resources: + - categories: + - all + resource: routes + responseKind: + group: "" + kind: Route + version: "" + scope: Namespaced + singularResource: route + subresources: + - responseKind: + group: "" + kind: Route + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: security.openshift.io + versions: + - freshness: Current + resources: + - resource: podsecuritypolicyreviews + responseKind: + group: "" + kind: PodSecurityPolicyReview + version: "" + scope: Namespaced + singularResource: podsecuritypolicyreview + verbs: + - create + - resource: podsecuritypolicyselfsubjectreviews + responseKind: + group: "" + kind: PodSecurityPolicySelfSubjectReview + version: "" + scope: Namespaced + singularResource: podsecuritypolicyselfsubjectreview + verbs: + - create + - resource: podsecuritypolicysubjectreviews + responseKind: + group: "" + kind: PodSecurityPolicySubjectReview + version: "" + scope: Namespaced + singularResource: podsecuritypolicysubjectreview + verbs: + - create + - resource: rangeallocations + responseKind: + group: "" + kind: RangeAllocation + version: "" + scope: Cluster + singularResource: rangeallocation + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: securitycontextconstraints + responseKind: + group: "" + kind: SecurityContextConstraints + version: "" + scope: Cluster + shortNames: + - scc + singularResource: securitycontextconstraint + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: template.openshift.io + versions: + - freshness: Current + resources: + - resource: brokertemplateinstances + responseKind: + group: "" + kind: BrokerTemplateInstance + version: "" + scope: Cluster + singularResource: brokertemplateinstance + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: processedtemplates + responseKind: + group: "" + kind: Template + version: "" + scope: Namespaced + singularResource: processedtemplate + verbs: + - create + - resource: templateinstances + responseKind: + group: "" + kind: TemplateInstance + version: "" + scope: Namespaced + singularResource: templateinstance + subresources: + - responseKind: + group: "" + kind: TemplateInstance + version: "" + subresource: status + verbs: + - get + - patch + - update + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: templates + responseKind: + group: "" + kind: Template + version: "" + scope: Namespaced + singularResource: template + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: user.openshift.io + versions: + - freshness: Current + resources: + - resource: groups + responseKind: + group: "" + kind: Group + version: "" + scope: Cluster + singularResource: group + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: identities + responseKind: + group: "" + kind: Identity + version: "" + scope: Cluster + singularResource: identity + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + - resource: useridentitymappings + responseKind: + group: "" + kind: UserIdentityMapping + version: "" + scope: Cluster + singularResource: useridentitymapping + verbs: + - create + - delete + - get + - patch + - update + - resource: users + responseKind: + group: "" + kind: User + version: "" + scope: Cluster + singularResource: user + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: packages.operators.coreos.com + versions: + - freshness: Current + resources: + - resource: packagemanifests + responseKind: + group: "" + kind: PackageManifest + version: "" + scope: Namespaced + singularResource: packagemanifest + subresources: + - responseKind: + group: "" + kind: PackageManifest + version: "" + subresource: icon + verbs: + - get + verbs: + - get + - list + version: v1 +- metadata: + creationTimestamp: null + name: apiserver.openshift.io + versions: + - freshness: Current + resources: + - resource: apirequestcounts + responseKind: + group: apiserver.openshift.io + kind: APIRequestCount + version: v1 + scope: Cluster + singularResource: apirequestcount + subresources: + - responseKind: + group: apiserver.openshift.io + kind: APIRequestCount + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: autoscaling.openshift.io + versions: + - freshness: Current + resources: + - resource: clusterautoscalers + responseKind: + group: autoscaling.openshift.io + kind: ClusterAutoscaler + version: v1 + scope: Cluster + shortNames: + - ca + singularResource: clusterautoscaler + subresources: + - responseKind: + group: autoscaling.openshift.io + kind: ClusterAutoscaler + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: machineautoscalers + responseKind: + group: autoscaling.openshift.io + kind: MachineAutoscaler + version: v1beta1 + scope: Namespaced + shortNames: + - ma + singularResource: machineautoscaler + subresources: + - responseKind: + group: autoscaling.openshift.io + kind: MachineAutoscaler + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 +- metadata: + creationTimestamp: null + name: autoscalinge2e.example.com + versions: + - freshness: Current + resources: + - resource: testcrds + responseKind: + group: autoscalinge2e.example.com + kind: TestCRD + version: v1 + scope: Namespaced + singularResource: testcrd + subresources: + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: cloudcredential.openshift.io + versions: + - freshness: Current + resources: + - resource: credentialsrequests + responseKind: + group: cloudcredential.openshift.io + kind: CredentialsRequest + version: v1 + scope: Namespaced + singularResource: credentialsrequest + subresources: + - responseKind: + group: cloudcredential.openshift.io + kind: CredentialsRequest + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: config.openshift.io + versions: + - freshness: Current + resources: + - resource: apiservers + responseKind: + group: config.openshift.io + kind: APIServer + version: v1 + scope: Cluster + singularResource: apiserver + subresources: + - responseKind: + group: config.openshift.io + kind: APIServer + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: authentications + responseKind: + group: config.openshift.io + kind: Authentication + version: v1 + scope: Cluster + singularResource: authentication + subresources: + - responseKind: + group: config.openshift.io + kind: Authentication + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: builds + responseKind: + group: config.openshift.io + kind: Build + version: v1 + scope: Cluster + singularResource: build + subresources: + - responseKind: + group: config.openshift.io + kind: Build + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: clusteroperators + responseKind: + group: config.openshift.io + kind: ClusterOperator + version: v1 + scope: Cluster + shortNames: + - co + singularResource: clusteroperator + subresources: + - responseKind: + group: config.openshift.io + kind: ClusterOperator + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: clusterversions + responseKind: + group: config.openshift.io + kind: ClusterVersion + version: v1 + scope: Cluster + singularResource: clusterversion + subresources: + - responseKind: + group: config.openshift.io + kind: ClusterVersion + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consoles + responseKind: + group: config.openshift.io + kind: Console + version: v1 + scope: Cluster + singularResource: console + subresources: + - responseKind: + group: config.openshift.io + kind: Console + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: dnses + responseKind: + group: config.openshift.io + kind: DNS + version: v1 + scope: Cluster + singularResource: dns + subresources: + - responseKind: + group: config.openshift.io + kind: DNS + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: featuregates + responseKind: + group: config.openshift.io + kind: FeatureGate + version: v1 + scope: Cluster + singularResource: featuregate + subresources: + - responseKind: + group: config.openshift.io + kind: FeatureGate + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: imagecontentpolicies + responseKind: + group: config.openshift.io + kind: ImageContentPolicy + version: v1 + scope: Cluster + singularResource: imagecontentpolicy + subresources: + - responseKind: + group: config.openshift.io + kind: ImageContentPolicy + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: imagedigestmirrorsets + responseKind: + group: config.openshift.io + kind: ImageDigestMirrorSet + version: v1 + scope: Cluster + shortNames: + - idms + singularResource: imagedigestmirrorset + subresources: + - responseKind: + group: config.openshift.io + kind: ImageDigestMirrorSet + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: images + responseKind: + group: config.openshift.io + kind: Image + version: v1 + scope: Cluster + singularResource: image + subresources: + - responseKind: + group: config.openshift.io + kind: Image + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: imagetagmirrorsets + responseKind: + group: config.openshift.io + kind: ImageTagMirrorSet + version: v1 + scope: Cluster + shortNames: + - itms + singularResource: imagetagmirrorset + subresources: + - responseKind: + group: config.openshift.io + kind: ImageTagMirrorSet + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: infrastructures + responseKind: + group: config.openshift.io + kind: Infrastructure + version: v1 + scope: Cluster + singularResource: infrastructure + subresources: + - responseKind: + group: config.openshift.io + kind: Infrastructure + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: ingresses + responseKind: + group: config.openshift.io + kind: Ingress + version: v1 + scope: Cluster + singularResource: ingress + subresources: + - responseKind: + group: config.openshift.io + kind: Ingress + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: networks + responseKind: + group: config.openshift.io + kind: Network + version: v1 + scope: Cluster + singularResource: network + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: nodes + responseKind: + group: config.openshift.io + kind: Node + version: v1 + scope: Cluster + singularResource: node + subresources: + - responseKind: + group: config.openshift.io + kind: Node + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: oauths + responseKind: + group: config.openshift.io + kind: OAuth + version: v1 + scope: Cluster + singularResource: oauth + subresources: + - responseKind: + group: config.openshift.io + kind: OAuth + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: operatorhubs + responseKind: + group: config.openshift.io + kind: OperatorHub + version: v1 + scope: Cluster + singularResource: operatorhub + subresources: + - responseKind: + group: config.openshift.io + kind: OperatorHub + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: projects + responseKind: + group: config.openshift.io + kind: Project + version: v1 + scope: Cluster + singularResource: project + subresources: + - responseKind: + group: config.openshift.io + kind: Project + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: proxies + responseKind: + group: config.openshift.io + kind: Proxy + version: v1 + scope: Cluster + singularResource: proxy + subresources: + - responseKind: + group: config.openshift.io + kind: Proxy + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: schedulers + responseKind: + group: config.openshift.io + kind: Scheduler + version: v1 + scope: Cluster + singularResource: scheduler + subresources: + - responseKind: + group: config.openshift.io + kind: Scheduler + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: console.openshift.io + versions: + - freshness: Current + resources: + - resource: consoleclidownloads + responseKind: + group: console.openshift.io + kind: ConsoleCLIDownload + version: v1 + scope: Cluster + singularResource: consoleclidownload + subresources: + - responseKind: + group: console.openshift.io + kind: ConsoleCLIDownload + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consoleexternalloglinks + responseKind: + group: console.openshift.io + kind: ConsoleExternalLogLink + version: v1 + scope: Cluster + singularResource: consoleexternalloglink + subresources: + - responseKind: + group: console.openshift.io + kind: ConsoleExternalLogLink + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consolelinks + responseKind: + group: console.openshift.io + kind: ConsoleLink + version: v1 + scope: Cluster + singularResource: consolelink + subresources: + - responseKind: + group: console.openshift.io + kind: ConsoleLink + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consolenotifications + responseKind: + group: console.openshift.io + kind: ConsoleNotification + version: v1 + scope: Cluster + singularResource: consolenotification + subresources: + - responseKind: + group: console.openshift.io + kind: ConsoleNotification + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consoleplugins + responseKind: + group: console.openshift.io + kind: ConsolePlugin + version: v1 + scope: Cluster + singularResource: consoleplugin + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consolequickstarts + responseKind: + group: console.openshift.io + kind: ConsoleQuickStart + version: v1 + scope: Cluster + singularResource: consolequickstart + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consolesamples + responseKind: + group: console.openshift.io + kind: ConsoleSample + version: v1 + scope: Cluster + singularResource: consolesample + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consoleyamlsamples + responseKind: + group: console.openshift.io + kind: ConsoleYAMLSample + version: v1 + scope: Cluster + singularResource: consoleyamlsample + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: consoleplugins + responseKind: + group: console.openshift.io + kind: ConsolePlugin + version: v1alpha1 + scope: Cluster + singularResource: consoleplugin + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: controlplane.operator.openshift.io + versions: + - freshness: Current + resources: + - resource: podnetworkconnectivitychecks + responseKind: + group: controlplane.operator.openshift.io + kind: PodNetworkConnectivityCheck + version: v1alpha1 + scope: Namespaced + singularResource: podnetworkconnectivitycheck + subresources: + - responseKind: + group: controlplane.operator.openshift.io + kind: PodNetworkConnectivityCheck + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: helm.openshift.io + versions: + - freshness: Current + resources: + - resource: helmchartrepositories + responseKind: + group: helm.openshift.io + kind: HelmChartRepository + version: v1beta1 + scope: Cluster + singularResource: helmchartrepository + subresources: + - responseKind: + group: helm.openshift.io + kind: HelmChartRepository + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: projecthelmchartrepositories + responseKind: + group: helm.openshift.io + kind: ProjectHelmChartRepository + version: v1beta1 + scope: Namespaced + singularResource: projecthelmchartrepository + subresources: + - responseKind: + group: helm.openshift.io + kind: ProjectHelmChartRepository + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 +- metadata: + creationTimestamp: null + name: imageregistry.operator.openshift.io + versions: + - freshness: Current + resources: + - resource: configs + responseKind: + group: imageregistry.operator.openshift.io + kind: Config + version: v1 + scope: Cluster + singularResource: config + subresources: + - responseKind: + group: imageregistry.operator.openshift.io + kind: Config + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: imagepruners + responseKind: + group: imageregistry.operator.openshift.io + kind: ImagePruner + version: v1 + scope: Cluster + singularResource: imagepruner + subresources: + - responseKind: + group: imageregistry.operator.openshift.io + kind: ImagePruner + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: infrastructure.cluster.x-k8s.io + versions: + - freshness: Current + resources: + - categories: + - cluster-api + resource: metal3remediations + responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3Remediation + version: v1beta1 + scope: Namespaced + shortNames: + - m3r + - m3remediation + singularResource: metal3remediation + subresources: + - responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3Remediation + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - cluster-api + resource: metal3remediationtemplates + responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3RemediationTemplate + version: v1beta1 + scope: Namespaced + shortNames: + - m3rt + - m3remediationtemplate + - m3remediationtemplates + - metal3rt + - metal3remediationtemplate + singularResource: metal3remediationtemplate + subresources: + - responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3RemediationTemplate + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 + - freshness: Current + resources: + - categories: + - cluster-api + resource: metal3remediations + responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3Remediation + version: v1alpha5 + scope: Namespaced + shortNames: + - m3r + - m3remediation + singularResource: metal3remediation + subresources: + - responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3Remediation + version: v1alpha5 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - cluster-api + resource: metal3remediationtemplates + responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3RemediationTemplate + version: v1alpha5 + scope: Namespaced + shortNames: + - m3rt + - m3remediationtemplate + - m3remediationtemplates + - metal3rt + - metal3remediationtemplate + singularResource: metal3remediationtemplate + subresources: + - responseKind: + group: infrastructure.cluster.x-k8s.io + kind: Metal3RemediationTemplate + version: v1alpha5 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha5 +- metadata: + creationTimestamp: null + name: ingress.operator.openshift.io + versions: + - freshness: Current + resources: + - resource: dnsrecords + responseKind: + group: ingress.operator.openshift.io + kind: DNSRecord + version: v1 + scope: Namespaced + singularResource: dnsrecord + subresources: + - responseKind: + group: ingress.operator.openshift.io + kind: DNSRecord + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: ipam.cluster.x-k8s.io + versions: + - freshness: Current + resources: + - categories: + - cluster-api + resource: ipaddressclaims + responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddressClaim + version: v1beta1 + scope: Namespaced + singularResource: ipaddressclaim + subresources: + - responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddressClaim + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - cluster-api + resource: ipaddresses + responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddress + version: v1beta1 + scope: Namespaced + singularResource: ipaddress + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 + - freshness: Current + resources: + - categories: + - cluster-api + resource: ipaddressclaims + responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddressClaim + version: v1alpha1 + scope: Namespaced + singularResource: ipaddressclaim + subresources: + - responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddressClaim + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - cluster-api + resource: ipaddresses + responseKind: + group: ipam.cluster.x-k8s.io + kind: IPAddress + version: v1alpha1 + scope: Namespaced + singularResource: ipaddress + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: k8s.cni.cncf.io + versions: + - freshness: Current + resources: + - resource: network-attachment-definitions + responseKind: + group: k8s.cni.cncf.io + kind: NetworkAttachmentDefinition + version: v1 + scope: Namespaced + shortNames: + - net-attach-def + singularResource: network-attachment-definition + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: k8s.ovn.org + versions: + - freshness: Current + resources: + - resource: adminpolicybasedexternalroutes + responseKind: + group: k8s.ovn.org + kind: AdminPolicyBasedExternalRoute + version: v1 + scope: Cluster + shortNames: + - apbexternalroute + singularResource: adminpolicybasedexternalroute + subresources: + - responseKind: + group: k8s.ovn.org + kind: AdminPolicyBasedExternalRoute + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: egressfirewalls + responseKind: + group: k8s.ovn.org + kind: EgressFirewall + version: v1 + scope: Namespaced + singularResource: egressfirewall + subresources: + - responseKind: + group: k8s.ovn.org + kind: EgressFirewall + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: egressips + responseKind: + group: k8s.ovn.org + kind: EgressIP + version: v1 + scope: Cluster + shortNames: + - eip + singularResource: egressip + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: egressqoses + responseKind: + group: k8s.ovn.org + kind: EgressQoS + version: v1 + scope: Namespaced + singularResource: egressqos + subresources: + - responseKind: + group: k8s.ovn.org + kind: EgressQoS + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: egressservices + responseKind: + group: k8s.ovn.org + kind: EgressService + version: v1 + scope: Namespaced + singularResource: egressservice + subresources: + - responseKind: + group: k8s.ovn.org + kind: EgressService + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: machine.openshift.io + versions: + - freshness: Current + resources: + - resource: controlplanemachinesets + responseKind: + group: machine.openshift.io + kind: ControlPlaneMachineSet + version: v1 + scope: Namespaced + singularResource: controlplanemachineset + subresources: + - responseKind: + group: machine.openshift.io + kind: ControlPlaneMachineSet + version: v1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: machinehealthchecks + responseKind: + group: machine.openshift.io + kind: MachineHealthCheck + version: v1beta1 + scope: Namespaced + shortNames: + - mhc + - mhcs + singularResource: machinehealthcheck + subresources: + - responseKind: + group: machine.openshift.io + kind: MachineHealthCheck + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: machines + responseKind: + group: machine.openshift.io + kind: Machine + version: v1beta1 + scope: Namespaced + singularResource: machine + subresources: + - responseKind: + group: machine.openshift.io + kind: Machine + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: machinesets + responseKind: + group: machine.openshift.io + kind: MachineSet + version: v1beta1 + scope: Namespaced + singularResource: machineset + subresources: + - responseKind: + group: machine.openshift.io + kind: MachineSet + version: v1beta1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 +- metadata: + creationTimestamp: null + name: machineconfiguration.openshift.io + versions: + - freshness: Current + resources: + - resource: containerruntimeconfigs + responseKind: + group: machineconfiguration.openshift.io + kind: ContainerRuntimeConfig + version: v1 + scope: Cluster + shortNames: + - ctrcfg + singularResource: containerruntimeconfig + subresources: + - responseKind: + group: machineconfiguration.openshift.io + kind: ContainerRuntimeConfig + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: controllerconfigs + responseKind: + group: machineconfiguration.openshift.io + kind: ControllerConfig + version: v1 + scope: Cluster + singularResource: controllerconfig + subresources: + - responseKind: + group: machineconfiguration.openshift.io + kind: ControllerConfig + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: kubeletconfigs + responseKind: + group: machineconfiguration.openshift.io + kind: KubeletConfig + version: v1 + scope: Cluster + singularResource: kubeletconfig + subresources: + - responseKind: + group: machineconfiguration.openshift.io + kind: KubeletConfig + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: machineconfigpools + responseKind: + group: machineconfiguration.openshift.io + kind: MachineConfigPool + version: v1 + scope: Cluster + shortNames: + - mcp + singularResource: machineconfigpool + subresources: + - responseKind: + group: machineconfiguration.openshift.io + kind: MachineConfigPool + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: machineconfigs + responseKind: + group: machineconfiguration.openshift.io + kind: MachineConfig + version: v1 + scope: Cluster + shortNames: + - mc + singularResource: machineconfig + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: metal3.io + versions: + - freshness: Current + resources: + - resource: baremetalhosts + responseKind: + group: metal3.io + kind: BareMetalHost + version: v1alpha1 + scope: Namespaced + shortNames: + - bmh + - bmhost + singularResource: baremetalhost + subresources: + - responseKind: + group: metal3.io + kind: BareMetalHost + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: bmceventsubscriptions + responseKind: + group: metal3.io + kind: BMCEventSubscription + version: v1alpha1 + scope: Namespaced + shortNames: + - bes + - bmcevent + singularResource: bmceventsubscription + subresources: + - responseKind: + group: metal3.io + kind: BMCEventSubscription + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: dataimages + responseKind: + group: metal3.io + kind: DataImage + version: v1alpha1 + scope: Namespaced + singularResource: dataimage + subresources: + - responseKind: + group: metal3.io + kind: DataImage + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: firmwareschemas + responseKind: + group: metal3.io + kind: FirmwareSchema + version: v1alpha1 + scope: Namespaced + singularResource: firmwareschema + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: hardwaredata + responseKind: + group: metal3.io + kind: HardwareData + version: v1alpha1 + scope: Namespaced + shortNames: + - hd + singularResource: hardwaredata + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: hostfirmwarecomponents + responseKind: + group: metal3.io + kind: HostFirmwareComponents + version: v1alpha1 + scope: Namespaced + singularResource: hostfirmwarecomponents + subresources: + - responseKind: + group: metal3.io + kind: HostFirmwareComponents + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: hostfirmwaresettings + responseKind: + group: metal3.io + kind: HostFirmwareSettings + version: v1alpha1 + scope: Namespaced + shortNames: + - hfs + singularResource: hostfirmwaresettings + subresources: + - responseKind: + group: metal3.io + kind: HostFirmwareSettings + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: hostupdatepolicies + responseKind: + group: metal3.io + kind: HostUpdatePolicy + version: v1alpha1 + scope: Namespaced + singularResource: hostupdatepolicy + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: preprovisioningimages + responseKind: + group: metal3.io + kind: PreprovisioningImage + version: v1alpha1 + scope: Namespaced + shortNames: + - ppimg + singularResource: preprovisioningimage + subresources: + - responseKind: + group: metal3.io + kind: PreprovisioningImage + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: provisionings + responseKind: + group: metal3.io + kind: Provisioning + version: v1alpha1 + scope: Cluster + singularResource: provisioning + subresources: + - responseKind: + group: metal3.io + kind: Provisioning + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: migration.k8s.io + versions: + - freshness: Current + resources: + - resource: storagestates + responseKind: + group: migration.k8s.io + kind: StorageState + version: v1alpha1 + scope: Cluster + singularResource: storagestate + subresources: + - responseKind: + group: migration.k8s.io + kind: StorageState + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: storageversionmigrations + responseKind: + group: migration.k8s.io + kind: StorageVersionMigration + version: v1alpha1 + scope: Cluster + singularResource: storageversionmigration + subresources: + - responseKind: + group: migration.k8s.io + kind: StorageVersionMigration + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: monitoring.coreos.com + versions: + - freshness: Current + resources: + - categories: + - prometheus-operator + resource: alertmanagers + responseKind: + group: monitoring.coreos.com + kind: Alertmanager + version: v1 + scope: Namespaced + shortNames: + - am + singularResource: alertmanager + subresources: + - responseKind: + group: monitoring.coreos.com + kind: Alertmanager + version: v1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: podmonitors + responseKind: + group: monitoring.coreos.com + kind: PodMonitor + version: v1 + scope: Namespaced + shortNames: + - pmon + singularResource: podmonitor + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: probes + responseKind: + group: monitoring.coreos.com + kind: Probe + version: v1 + scope: Namespaced + shortNames: + - prb + singularResource: probe + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: prometheuses + responseKind: + group: monitoring.coreos.com + kind: Prometheus + version: v1 + scope: Namespaced + shortNames: + - prom + singularResource: prometheus + subresources: + - responseKind: + group: monitoring.coreos.com + kind: Prometheus + version: v1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: prometheusrules + responseKind: + group: monitoring.coreos.com + kind: PrometheusRule + version: v1 + scope: Namespaced + shortNames: + - promrule + singularResource: prometheusrule + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: servicemonitors + responseKind: + group: monitoring.coreos.com + kind: ServiceMonitor + version: v1 + scope: Namespaced + shortNames: + - smon + singularResource: servicemonitor + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - prometheus-operator + resource: thanosrulers + responseKind: + group: monitoring.coreos.com + kind: ThanosRuler + version: v1 + scope: Namespaced + shortNames: + - ruler + singularResource: thanosruler + subresources: + - responseKind: + group: monitoring.coreos.com + kind: ThanosRuler + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - categories: + - prometheus-operator + resource: alertmanagerconfigs + responseKind: + group: monitoring.coreos.com + kind: AlertmanagerConfig + version: v1beta1 + scope: Namespaced + shortNames: + - amcfg + singularResource: alertmanagerconfig + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1beta1 + - freshness: Current + resources: + - categories: + - prometheus-operator + resource: alertmanagerconfigs + responseKind: + group: monitoring.coreos.com + kind: AlertmanagerConfig + version: v1alpha1 + scope: Namespaced + shortNames: + - amcfg + singularResource: alertmanagerconfig + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: monitoring.openshift.io + versions: + - freshness: Current + resources: + - resource: alertingrules + responseKind: + group: monitoring.openshift.io + kind: AlertingRule + version: v1 + scope: Namespaced + singularResource: alertingrule + subresources: + - responseKind: + group: monitoring.openshift.io + kind: AlertingRule + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: alertrelabelconfigs + responseKind: + group: monitoring.openshift.io + kind: AlertRelabelConfig + version: v1 + scope: Namespaced + singularResource: alertrelabelconfig + subresources: + - responseKind: + group: monitoring.openshift.io + kind: AlertRelabelConfig + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: network.operator.openshift.io + versions: + - freshness: Current + resources: + - resource: egressrouters + responseKind: + group: network.operator.openshift.io + kind: EgressRouter + version: v1 + scope: Namespaced + singularResource: egressrouter + subresources: + - responseKind: + group: network.operator.openshift.io + kind: EgressRouter + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: operatorpkis + responseKind: + group: network.operator.openshift.io + kind: OperatorPKI + version: v1 + scope: Namespaced + singularResource: operatorpki + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: operator.openshift.io + versions: + - freshness: Current + resources: + - resource: authentications + responseKind: + group: operator.openshift.io + kind: Authentication + version: v1 + scope: Cluster + singularResource: authentication + subresources: + - responseKind: + group: operator.openshift.io + kind: Authentication + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: cloudcredentials + responseKind: + group: operator.openshift.io + kind: CloudCredential + version: v1 + scope: Cluster + singularResource: cloudcredential + subresources: + - responseKind: + group: operator.openshift.io + kind: CloudCredential + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: clustercsidrivers + responseKind: + group: operator.openshift.io + kind: ClusterCSIDriver + version: v1 + scope: Cluster + singularResource: clustercsidriver + subresources: + - responseKind: + group: operator.openshift.io + kind: ClusterCSIDriver + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: configs + responseKind: + group: operator.openshift.io + kind: Config + version: v1 + scope: Cluster + singularResource: config + subresources: + - responseKind: + group: operator.openshift.io + kind: Config + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: consoles + responseKind: + group: operator.openshift.io + kind: Console + version: v1 + scope: Cluster + singularResource: console + subresources: + - responseKind: + group: operator.openshift.io + kind: Console + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: csisnapshotcontrollers + responseKind: + group: operator.openshift.io + kind: CSISnapshotController + version: v1 + scope: Cluster + singularResource: csisnapshotcontroller + subresources: + - responseKind: + group: operator.openshift.io + kind: CSISnapshotController + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: dnses + responseKind: + group: operator.openshift.io + kind: DNS + version: v1 + scope: Cluster + singularResource: dns + subresources: + - responseKind: + group: operator.openshift.io + kind: DNS + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: etcds + responseKind: + group: operator.openshift.io + kind: Etcd + version: v1 + scope: Cluster + singularResource: etcd + subresources: + - responseKind: + group: operator.openshift.io + kind: Etcd + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: ingresscontrollers + responseKind: + group: operator.openshift.io + kind: IngressController + version: v1 + scope: Namespaced + singularResource: ingresscontroller + subresources: + - responseKind: + group: operator.openshift.io + kind: IngressController + version: v1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: insightsoperators + responseKind: + group: operator.openshift.io + kind: InsightsOperator + version: v1 + scope: Cluster + singularResource: insightsoperator + subresources: + - responseKind: + group: operator.openshift.io + kind: InsightsOperator + version: v1 + subresource: status + verbs: + - get + - patch + - update + - responseKind: + group: autoscaling + kind: Scale + version: v1 + subresource: scale + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: kubeapiservers + responseKind: + group: operator.openshift.io + kind: KubeAPIServer + version: v1 + scope: Cluster + singularResource: kubeapiserver + subresources: + - responseKind: + group: operator.openshift.io + kind: KubeAPIServer + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: kubecontrollermanagers + responseKind: + group: operator.openshift.io + kind: KubeControllerManager + version: v1 + scope: Cluster + singularResource: kubecontrollermanager + subresources: + - responseKind: + group: operator.openshift.io + kind: KubeControllerManager + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: kubeschedulers + responseKind: + group: operator.openshift.io + kind: KubeScheduler + version: v1 + scope: Cluster + singularResource: kubescheduler + subresources: + - responseKind: + group: operator.openshift.io + kind: KubeScheduler + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: kubestorageversionmigrators + responseKind: + group: operator.openshift.io + kind: KubeStorageVersionMigrator + version: v1 + scope: Cluster + singularResource: kubestorageversionmigrator + subresources: + - responseKind: + group: operator.openshift.io + kind: KubeStorageVersionMigrator + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: machineconfigurations + responseKind: + group: operator.openshift.io + kind: MachineConfiguration + version: v1 + scope: Cluster + singularResource: machineconfiguration + subresources: + - responseKind: + group: operator.openshift.io + kind: MachineConfiguration + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: networks + responseKind: + group: operator.openshift.io + kind: Network + version: v1 + scope: Cluster + singularResource: network + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: openshiftapiservers + responseKind: + group: operator.openshift.io + kind: OpenShiftAPIServer + version: v1 + scope: Cluster + singularResource: openshiftapiserver + subresources: + - responseKind: + group: operator.openshift.io + kind: OpenShiftAPIServer + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - coreoperators + resource: openshiftcontrollermanagers + responseKind: + group: operator.openshift.io + kind: OpenShiftControllerManager + version: v1 + scope: Cluster + singularResource: openshiftcontrollermanager + subresources: + - responseKind: + group: operator.openshift.io + kind: OpenShiftControllerManager + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: servicecas + responseKind: + group: operator.openshift.io + kind: ServiceCA + version: v1 + scope: Cluster + singularResource: serviceca + subresources: + - responseKind: + group: operator.openshift.io + kind: ServiceCA + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: storages + responseKind: + group: operator.openshift.io + kind: Storage + version: v1 + scope: Cluster + singularResource: storage + subresources: + - responseKind: + group: operator.openshift.io + kind: Storage + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: imagecontentsourcepolicies + responseKind: + group: operator.openshift.io + kind: ImageContentSourcePolicy + version: v1alpha1 + scope: Cluster + singularResource: imagecontentsourcepolicy + subresources: + - responseKind: + group: operator.openshift.io + kind: ImageContentSourcePolicy + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: operators.coreos.com + versions: + - freshness: Current + resources: + - categories: + - olm + resource: operatorconditions + responseKind: + group: operators.coreos.com + kind: OperatorCondition + version: v2 + scope: Namespaced + shortNames: + - condition + singularResource: operatorcondition + subresources: + - responseKind: + group: operators.coreos.com + kind: OperatorCondition + version: v2 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v2 + - freshness: Current + resources: + - categories: + - olm + resource: olmconfigs + responseKind: + group: operators.coreos.com + kind: OLMConfig + version: v1 + scope: Cluster + singularResource: olmconfig + subresources: + - responseKind: + group: operators.coreos.com + kind: OLMConfig + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: operatorconditions + responseKind: + group: operators.coreos.com + kind: OperatorCondition + version: v1 + scope: Namespaced + shortNames: + - condition + singularResource: operatorcondition + subresources: + - responseKind: + group: operators.coreos.com + kind: OperatorCondition + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: operatorgroups + responseKind: + group: operators.coreos.com + kind: OperatorGroup + version: v1 + scope: Namespaced + shortNames: + - og + singularResource: operatorgroup + subresources: + - responseKind: + group: operators.coreos.com + kind: OperatorGroup + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: operators + responseKind: + group: operators.coreos.com + kind: Operator + version: v1 + scope: Cluster + singularResource: operator + subresources: + - responseKind: + group: operators.coreos.com + kind: Operator + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - categories: + - olm + resource: operatorgroups + responseKind: + group: operators.coreos.com + kind: OperatorGroup + version: v1alpha2 + scope: Namespaced + shortNames: + - og + singularResource: operatorgroup + subresources: + - responseKind: + group: operators.coreos.com + kind: OperatorGroup + version: v1alpha2 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha2 + - freshness: Current + resources: + - categories: + - olm + resource: catalogsources + responseKind: + group: operators.coreos.com + kind: CatalogSource + version: v1alpha1 + scope: Namespaced + shortNames: + - catsrc + singularResource: catalogsource + subresources: + - responseKind: + group: operators.coreos.com + kind: CatalogSource + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: clusterserviceversions + responseKind: + group: operators.coreos.com + kind: ClusterServiceVersion + version: v1alpha1 + scope: Namespaced + shortNames: + - csv + - csvs + singularResource: clusterserviceversion + subresources: + - responseKind: + group: operators.coreos.com + kind: ClusterServiceVersion + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: installplans + responseKind: + group: operators.coreos.com + kind: InstallPlan + version: v1alpha1 + scope: Namespaced + shortNames: + - ip + singularResource: installplan + subresources: + - responseKind: + group: operators.coreos.com + kind: InstallPlan + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - categories: + - olm + resource: subscriptions + responseKind: + group: operators.coreos.com + kind: Subscription + version: v1alpha1 + scope: Namespaced + shortNames: + - sub + - subs + singularResource: subscription + subresources: + - responseKind: + group: operators.coreos.com + kind: Subscription + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: performance.openshift.io + versions: + - freshness: Current + resources: + - resource: performanceprofiles + responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v2 + scope: Cluster + singularResource: performanceprofile + subresources: + - responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v2 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v2 + - freshness: Current + resources: + - resource: performanceprofiles + responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v1 + scope: Cluster + singularResource: performanceprofile + subresources: + - responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 + - freshness: Current + resources: + - resource: performanceprofiles + responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v1alpha1 + scope: Cluster + singularResource: performanceprofile + subresources: + - responseKind: + group: performance.openshift.io + kind: PerformanceProfile + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: policy.networking.k8s.io + versions: + - freshness: Current + resources: + - resource: adminnetworkpolicies + responseKind: + group: policy.networking.k8s.io + kind: AdminNetworkPolicy + version: v1alpha1 + scope: Cluster + shortNames: + - anp + singularResource: adminnetworkpolicy + subresources: + - responseKind: + group: policy.networking.k8s.io + kind: AdminNetworkPolicy + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: baselineadminnetworkpolicies + responseKind: + group: policy.networking.k8s.io + kind: BaselineAdminNetworkPolicy + version: v1alpha1 + scope: Cluster + shortNames: + - banp + singularResource: baselineadminnetworkpolicy + subresources: + - responseKind: + group: policy.networking.k8s.io + kind: BaselineAdminNetworkPolicy + version: v1alpha1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: samples.operator.openshift.io + versions: + - freshness: Current + resources: + - resource: configs + responseKind: + group: samples.operator.openshift.io + kind: Config + version: v1 + scope: Cluster + singularResource: config + subresources: + - responseKind: + group: samples.operator.openshift.io + kind: Config + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: security.internal.openshift.io + versions: + - freshness: Current + resources: + - resource: rangeallocations + responseKind: + group: security.internal.openshift.io + kind: RangeAllocation + version: v1 + scope: Cluster + singularResource: rangeallocation + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: snapshot.storage.k8s.io + versions: + - freshness: Current + resources: + - resource: volumesnapshotclasses + responseKind: + group: snapshot.storage.k8s.io + kind: VolumeSnapshotClass + version: v1 + scope: Cluster + shortNames: + - vsclass + - vsclasses + singularResource: volumesnapshotclass + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: volumesnapshotcontents + responseKind: + group: snapshot.storage.k8s.io + kind: VolumeSnapshotContent + version: v1 + scope: Cluster + shortNames: + - vsc + - vscs + singularResource: volumesnapshotcontent + subresources: + - responseKind: + group: snapshot.storage.k8s.io + kind: VolumeSnapshotContent + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: volumesnapshots + responseKind: + group: snapshot.storage.k8s.io + kind: VolumeSnapshot + version: v1 + scope: Namespaced + shortNames: + - vs + singularResource: volumesnapshot + subresources: + - responseKind: + group: snapshot.storage.k8s.io + kind: VolumeSnapshot + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: tuned.openshift.io + versions: + - freshness: Current + resources: + - resource: profiles + responseKind: + group: tuned.openshift.io + kind: Profile + version: v1 + scope: Namespaced + singularResource: profile + subresources: + - responseKind: + group: tuned.openshift.io + kind: Profile + version: v1 + subresource: status + verbs: + - get + - patch + - update + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: tuneds + responseKind: + group: tuned.openshift.io + kind: Tuned + version: v1 + scope: Namespaced + singularResource: tuned + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1 +- metadata: + creationTimestamp: null + name: whereabouts.cni.cncf.io + versions: + - freshness: Current + resources: + - resource: ippools + responseKind: + group: whereabouts.cni.cncf.io + kind: IPPool + version: v1alpha1 + scope: Namespaced + singularResource: ippool + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + - resource: overlappingrangeipreservations + responseKind: + group: whereabouts.cni.cncf.io + kind: OverlappingRangeIPReservation + version: v1alpha1 + scope: Namespaced + singularResource: overlappingrangeipreservation + verbs: + - delete + - deletecollection + - get + - list + - patch + - create + - update + - watch + version: v1alpha1 +- metadata: + creationTimestamp: null + name: metrics.k8s.io + versions: + - freshness: Current + resources: + - resource: nodes + responseKind: + group: "" + kind: NodeMetrics + version: "" + scope: Cluster + singularResource: "" + verbs: + - get + - list + - resource: pods + responseKind: + group: "" + kind: PodMetrics + version: "" + scope: Namespaced + singularResource: "" + verbs: + - get + - list + version: v1beta1 +kind: APIGroupDiscoveryList +metadata: {} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/discovery_reader.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/discovery_reader.go new file mode 100644 index 000000000..e5126561c --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/discovery_reader.go @@ -0,0 +1,150 @@ +package manifestclient + +import ( + "embed" + "errors" + "fmt" + "io/fs" + "path/filepath" + "sync" + + apidiscoveryv2 "k8s.io/api/apidiscovery/v2" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/json" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "sigs.k8s.io/yaml" +) + +type kindData struct { + kind schema.GroupVersionKind + listKind schema.GroupVersionKind + err error +} + +func newDiscoveryReader(content fs.FS) *discoveryReader { + return &discoveryReader{ + sourceFS: content, + kindForResource: make(map[schema.GroupVersionResource]kindData), + } +} + +type discoveryReader struct { + kindForResource map[schema.GroupVersionResource]kindData + + sourceFS fs.FS + lock sync.RWMutex +} + +func (dr *discoveryReader) getKindForResource(gvr schema.GroupVersionResource) (kindData, error) { + dr.lock.RLock() + kindForGVR, ok := dr.kindForResource[gvr] + if ok { + defer dr.lock.RUnlock() + return kindForGVR, kindForGVR.err + } + dr.lock.RUnlock() + + dr.lock.Lock() + defer dr.lock.Unlock() + + kindForGVR, ok = dr.kindForResource[gvr] + if ok { + return kindForGVR, kindForGVR.err + } + + discoveryPath := "/apis" + if len(gvr.Group) == 0 { + discoveryPath = "/api" + } + discoveryBytes, err := dr.getGroupResourceDiscovery(&apirequest.RequestInfo{Path: discoveryPath}) + if err != nil { + kindForGVR.err = fmt.Errorf("error reading discovery: %w", err) + dr.kindForResource[gvr] = kindForGVR + return kindForGVR, kindForGVR.err + } + + discoveryInfo := &apidiscoveryv2.APIGroupDiscoveryList{} + if err := json.Unmarshal(discoveryBytes, discoveryInfo); err != nil { + kindForGVR.err = fmt.Errorf("error unmarshalling discovery: %w", err) + dr.kindForResource[gvr] = kindForGVR + return kindForGVR, kindForGVR.err + } + + kindForGVR.err = fmt.Errorf("did not find kind for %v\n", gvr) + for _, groupInfo := range discoveryInfo.Items { + if groupInfo.Name != gvr.Group { + continue + } + for _, versionInfo := range groupInfo.Versions { + if versionInfo.Version != gvr.Version { + continue + } + for _, resourceInfo := range versionInfo.Resources { + if resourceInfo.Resource != gvr.Resource { + continue + } + if resourceInfo.ResponseKind == nil { + continue + } + kindForGVR.kind = schema.GroupVersionKind{ + Group: gvr.Group, + Version: gvr.Version, + Kind: resourceInfo.ResponseKind.Kind, + } + if len(resourceInfo.ResponseKind.Group) > 0 { + kindForGVR.kind.Group = resourceInfo.ResponseKind.Group + } + if len(resourceInfo.ResponseKind.Version) > 0 { + kindForGVR.kind.Version = resourceInfo.ResponseKind.Version + } + kindForGVR.listKind = schema.GroupVersionKind{ + Group: kindForGVR.kind.Group, + Version: kindForGVR.kind.Version, + Kind: resourceInfo.ResponseKind.Kind + "List", + } + kindForGVR.err = nil + dr.kindForResource[gvr] = kindForGVR + return kindForGVR, kindForGVR.err + } + } + } + + dr.kindForResource[gvr] = kindForGVR + return kindForGVR, kindForGVR.err +} + +func (dr *discoveryReader) getGroupResourceDiscovery(requestInfo *apirequest.RequestInfo) ([]byte, error) { + switch { + case requestInfo.Path == "/api": + return dr.getAggregatedDiscoveryForURL("aggregated-discovery-api.yaml", requestInfo.Path) + case requestInfo.Path == "/apis": + return dr.getAggregatedDiscoveryForURL("aggregated-discovery-apis.yaml", requestInfo.Path) + default: + // TODO can probably do better + return nil, fmt.Errorf("unsupported discovery path: %q", requestInfo.Path) + } +} + +func (dr *discoveryReader) getAggregatedDiscoveryForURL(filename, url string) ([]byte, error) { + discoveryBytes, err := fs.ReadFile(dr.sourceFS, filename) + if errors.Is(err, fs.ErrNotExist) { + discoveryBytes, err = fs.ReadFile(defaultDiscovery, filepath.Join("default-discovery", filename)) + } + if err != nil { + return nil, fmt.Errorf("error reading discovery: %w", err) + } + + apiMap := map[string]interface{}{} + if err := yaml.Unmarshal(discoveryBytes, &apiMap); err != nil { + return nil, fmt.Errorf("discovery %q unmarshal failed: %w", url, err) + } + apiJSON, err := json.Marshal(apiMap) + if err != nil { + return nil, fmt.Errorf("discovery %q marshal failed: %w", url, err) + } + + return apiJSON, err +} + +//go:embed default-discovery +var defaultDiscovery embed.FS diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/encoding.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/encoding.go new file mode 100644 index 000000000..811ee9b55 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/encoding.go @@ -0,0 +1,90 @@ +package manifestclient + +import ( + "encoding/json" + "fmt" + "io/fs" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" +) + +func individualFromList(objList *unstructured.UnstructuredList, name string) (*unstructured.Unstructured, error) { + individualKind := strings.TrimSuffix(objList.GetKind(), "List") + + for _, obj := range objList.Items { + if obj.GetName() != name { + continue + } + + ret := obj.DeepCopy() + ret.SetKind(individualKind) + return ret, nil + } + + return nil, fmt.Errorf("not found in this list") +} + +func readListFile(sourceFS fs.FS, path string) (*unstructured.UnstructuredList, error) { + content, err := fs.ReadFile(sourceFS, path) + if err != nil { + return nil, fmt.Errorf("unable to read %q: %w", path, err) + } + + return decodeListObj(content) +} + +func readIndividualFile(sourceFS fs.FS, path string) (*unstructured.Unstructured, error) { + content, err := fs.ReadFile(sourceFS, path) + if err != nil { + return nil, fmt.Errorf("unable to read %q: %w", path, err) + } + + return decodeIndividualObj(content) +} + +var localScheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(localScheme) + +func decodeIndividualObj(content []byte) (*unstructured.Unstructured, error) { + obj, _, err := codecs.UniversalDecoder().Decode(content, nil, &unstructured.Unstructured{}) + if err != nil { + return nil, fmt.Errorf("unable to decode: %w", err) + } + return obj.(*unstructured.Unstructured), nil +} + +func decodeListObj(content []byte) (*unstructured.UnstructuredList, error) { + obj, _, err := codecs.UniversalDecoder().Decode(content, nil, &unstructured.UnstructuredList{}) + if err != nil { + return nil, fmt.Errorf("unable to decode: %w", err) + } + return obj.(*unstructured.UnstructuredList), nil +} + +func serializeIndividualObjToJSON(obj *unstructured.Unstructured) (string, error) { + ret, err := json.MarshalIndent(obj.Object, "", " ") + if err != nil { + return "", err + } + return string(ret) + "\n", nil +} + +func serializeListObjToJSON(obj *unstructured.UnstructuredList) (string, error) { + ret, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return "", err + } + return string(ret) + "\n", nil +} + +func serializeAPIResourceListToJSON(obj *metav1.APIResourceList) (string, error) { + ret, err := json.MarshalIndent(obj, "", " ") + if err != nil { + return "", err + } + return string(ret) + "\n", nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/get.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/get.go new file mode 100644 index 000000000..f47aa30be --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/get.go @@ -0,0 +1,123 @@ +package manifestclient + +import ( + "errors" + "fmt" + "io/fs" + "path/filepath" + + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// must-gather has a few different ways to store resources +// 1. cluster-scoped-resource/group/resource/.yaml +// 2. cluster-scoped-resource/group/resource.yaml +// 3. namespaces//group/resource/.yaml +// 4. namespaces//group/resource.yaml +// we have to choose which to prefer and we should always prefer the #2 if it's available. +// Keep in mind that to produce a cluster-scoped list of namespaced resources, you can need to navigate many namespaces. +func (mrt *manifestRoundTripper) get(requestInfo *apirequest.RequestInfo) ([]byte, error) { + if len(requestInfo.Name) == 0 { + return nil, fmt.Errorf("name required for GET") + } + if len(requestInfo.Resource) == 0 { + return nil, fmt.Errorf("resource required for GET") + } + requiredAPIVersion := fmt.Sprintf("%s/%s", requestInfo.APIGroup, requestInfo.APIVersion) + if len(requestInfo.APIGroup) == 0 { + requiredAPIVersion = fmt.Sprintf("%s", requestInfo.APIVersion) + } + + individualFilePath := individualGetFileLocation(requestInfo) + individualObj, individualErr := readIndividualFile(mrt.sourceFS, individualFilePath) + switch { + case errors.Is(individualErr, fs.ErrNotExist): + // try for the list + case individualErr != nil: + return nil, fmt.Errorf("unable to read file: %w", individualErr) + default: + if individualObj.GetAPIVersion() != requiredAPIVersion { + return nil, fmt.Errorf("actual version %v does not match request %v", individualObj.GetAPIVersion(), requiredAPIVersion) + } + ret, err := serializeIndividualObjToJSON(individualObj) + if err != nil { + return nil, fmt.Errorf("failed to serialize %v: %v", individualFilePath, err) + } + return []byte(ret), nil + } + + listFilePath := listGetFileLocation(requestInfo) + listObj, listErr := readListFile(mrt.sourceFS, listFilePath) + switch { + case errors.Is(listErr, fs.ErrNotExist): + // we need this to be a not-found when sent back + return nil, newNotFound(requestInfo) + + case listErr != nil: + return nil, fmt.Errorf("unable to read file: %w", listErr) + default: + obj, err := individualFromList(listObj, requestInfo.Name) + if obj == nil { + return nil, newNotFound(requestInfo) + } + if obj.GetAPIVersion() != requiredAPIVersion { + return nil, fmt.Errorf("actual version %v does not match request %v", obj.GetAPIVersion(), requiredAPIVersion) + } + + ret, err := serializeIndividualObjToJSON(obj) + if err != nil { + return nil, fmt.Errorf("failed to serialize %v: %v", listFilePath, err) + } + return []byte(ret), nil + } +} + +func individualGetFileLocation(requestInfo *apirequest.RequestInfo) string { + fileParts := []string{} + + if len(requestInfo.APIGroup) == 0 && + requestInfo.APIVersion == "v1" && + requestInfo.Resource == "namespaces" && + len(requestInfo.Subresource) == 0 && + requestInfo.Namespace == requestInfo.Name { // namespaces are weird. They list their own namespace in requestInfo.namespace + + fileParts = append(fileParts, "namespaces", requestInfo.Name, requestInfo.Name+".yaml") + return filepath.Join(fileParts...) + } + + if len(requestInfo.Namespace) > 0 { + fileParts = append(fileParts, "namespaces", requestInfo.Namespace) + } else { + fileParts = append(fileParts, "cluster-scoped-resources") + } + + if len(requestInfo.APIGroup) > 0 { + fileParts = append(fileParts, requestInfo.APIGroup) + } else { + fileParts = append(fileParts, "core") + } + + fileParts = append(fileParts, requestInfo.Resource, fmt.Sprintf("%s.yaml", requestInfo.Name)) + + return filepath.Join(fileParts...) +} + +func listGetFileLocation(requestInfo *apirequest.RequestInfo) string { + fileParts := []string{} + + if len(requestInfo.Namespace) > 0 { + fileParts = append(fileParts, "namespaces", requestInfo.Namespace) + } else { + fileParts = append(fileParts, "cluster-scoped-resources") + } + + if len(requestInfo.APIGroup) > 0 { + fileParts = append(fileParts, requestInfo.APIGroup) + } else { + fileParts = append(fileParts, "core") + } + + fileParts = append(fileParts, fmt.Sprintf("%s.yaml", requestInfo.Resource)) + + return filepath.Join(fileParts...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/io.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/io.go new file mode 100644 index 000000000..8ec17d04e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/io.go @@ -0,0 +1,37 @@ +package manifestclient + +import ( + "io" + "sync/atomic" + "time" +) + +func newDelayedNothingReader(timeout time.Duration) *delayedNothingReaderCloser { + return &delayedNothingReaderCloser{timeout: timeout} +} + +type delayedNothingReaderCloser struct { + timeout time.Duration + closed atomic.Bool +} + +func (d *delayedNothingReaderCloser) Read(p []byte) (n int, err error) { + if d.closed.Load() { + return 0, io.EOF + } + select { + case <-time.After(d.timeout): + d.Close() + } + if d.closed.Load() { + return 0, io.EOF + } + return 0, nil +} + +func (d *delayedNothingReaderCloser) Close() error { + d.closed.Store(true) + return nil +} + +var _ io.ReadCloser = &delayedNothingReaderCloser{} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/list.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/list.go new file mode 100644 index 000000000..6ee3da6e5 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/list.go @@ -0,0 +1,307 @@ +package manifestclient + +import ( + "errors" + "fmt" + "io/fs" + "path/filepath" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + apirequest "k8s.io/apiserver/pkg/endpoints/request" +) + +// must-gather has a few different ways to store resources +// 1. cluster-scoped-resource/group/resource/.yaml +// 2. cluster-scoped-resource/group/resource.yaml +// 3. namespaces//group/resource/.yaml +// 4. namespaces//group/resource.yaml +// we have to choose which to prefer and we should always prefer the #2 if it's available. +// Keep in mind that to produce a cluster-scoped list of namespaced resources, you can need to navigate many namespaces. +func (mrt *manifestRoundTripper) list(requestInfo *apirequest.RequestInfo) ([]byte, error) { + // TODO post-filter for label selectors + return mrt.listAll(requestInfo) +} + +func (mrt *manifestRoundTripper) listAll(requestInfo *apirequest.RequestInfo) ([]byte, error) { + var retList *unstructured.UnstructuredList + + // namespaces are special. + if len(requestInfo.APIGroup) == 0 && + requestInfo.APIVersion == "v1" && + requestInfo.Resource == "namespaces" && + len(requestInfo.Subresource) == 0 { + + return mrt.listAllNamespaces() + } + + gvr := schema.GroupVersionResource{ + Group: requestInfo.APIGroup, + Version: requestInfo.APIVersion, + Resource: requestInfo.Resource, + } + + kind, err := mrt.discoveryReader.getKindForResource(gvr) + if err != nil { + return nil, fmt.Errorf("unable to determine list kind: %w", err) + } + possibleListFiles, err := allPossibleListFileLocations(mrt.sourceFS, requestInfo) + switch { + case errors.Is(err, fs.ErrNotExist): + // continue to see if something else is present to return + case err != nil: + return nil, fmt.Errorf("unable to determine list file locations: %w", err) + } + for _, listFile := range possibleListFiles { + currList, err := readListFile(mrt.sourceFS, listFile) + switch { + case errors.Is(err, fs.ErrNotExist): + // do nothing, it's possible, not guaranteed + continue + case err != nil: + return nil, fmt.Errorf("unable to determine read list file %v: %w", listFile, err) + } + + if retList == nil { + retList = currList + continue + } + for i := range currList.Items { + retList.Items = append(retList.Items, currList.Items[i]) + } + } + if retList != nil { + if retList.GroupVersionKind() != kind.listKind { + return nil, fmt.Errorf("inconsistent list kind: got %v, expected %v", retList.GroupVersionKind(), kind.listKind) + } + retList, err := filterByLabelSelector(retList, requestInfo.LabelSelector) + if err != nil { + return nil, fmt.Errorf("failed to filter by labelSelector %s: %w", requestInfo.LabelSelector, err) + } + ret, err := serializeListObjToJSON(retList) + if err != nil { + return nil, fmt.Errorf("failed to serialize: %v", err) + } + return []byte(ret), nil + } + + retList = &unstructured.UnstructuredList{ + Object: map[string]interface{}{}, + Items: nil, + } + retList.SetGroupVersionKind(kind.listKind) + individualFiles, err := allIndividualFileLocations(mrt.sourceFS, requestInfo) + switch { + case errors.Is(err, fs.ErrNotExist): + // continue to see if something else is present to return + case err != nil: + return nil, fmt.Errorf("unable to determine individual file locations: %w", err) + } + for _, individualFile := range individualFiles { + currInstance, err := readIndividualFile(mrt.sourceFS, individualFile) + switch { + case errors.Is(err, fs.ErrNotExist): + // do nothing, it's possible, not guaranteed + continue + case err != nil: + return nil, fmt.Errorf("unable to determine read list file %v: %w", individualFile, err) + } + + retList.Items = append(retList.Items, *currInstance) + } + if len(retList.Items) > 0 { + if retList.Items[0].GroupVersionKind() != kind.kind { + return nil, fmt.Errorf("inconsistent item kind: got %v, expected %v", retList.Items[0].GroupVersionKind(), kind.kind) + } + retList, err := filterByLabelSelector(retList, requestInfo.LabelSelector) + if err != nil { + return nil, fmt.Errorf("failed to filter by labelSelector %s: %w", requestInfo.LabelSelector, err) + } + ret, err := serializeListObjToJSON(retList) + if err != nil { + return nil, fmt.Errorf("failed to serialize: %v", err) + } + return []byte(ret), nil + } + + // if we get here, there is no list file and no individual files. + // the namespace must exist or we would have returned long ago. Return an empty list. + ret, err := serializeListObjToJSON(retList) + if err != nil { + return nil, fmt.Errorf("failed to serialize: %v", err) + } + + return []byte(ret), nil +} + +func (mrt *manifestRoundTripper) listAllNamespaces() ([]byte, error) { + possibleNamespaceFiles, err := allPossibleNamespaceFiles(mrt.sourceFS) + switch { + case errors.Is(err, fs.ErrNotExist): + case err != nil: + return nil, fmt.Errorf("unable to determine list file alternative individual files: %w", err) + } + + namespaces := []unstructured.Unstructured{} + for _, individualFile := range possibleNamespaceFiles { + currNamespace, err := readIndividualFile(mrt.sourceFS, individualFile) + switch { + case errors.Is(err, fs.ErrNotExist): + // do nothing, it's possible, not guaranteed + continue + case err != nil: + return nil, fmt.Errorf("unable to determine read namespace individual file %v: %w", individualFile, err) + } + namespaces = append(namespaces, *currNamespace) + + } + + retList := &unstructured.UnstructuredList{ + Object: map[string]interface{}{}, + Items: namespaces, + } + retList.SetKind("NamespaceList") + retList.SetAPIVersion("v1") + + ret, err := serializeListObjToJSON(retList) + if err != nil { + return nil, fmt.Errorf("failed to serialize: %v", err) + } + + return []byte(ret), nil +} + +func allIndividualFileLocations(sourceFS fs.FS, requestInfo *apirequest.RequestInfo) ([]string, error) { + resourceDirectoryParts := []string{} + if len(requestInfo.APIGroup) > 0 { + resourceDirectoryParts = append(resourceDirectoryParts, requestInfo.APIGroup) + } else { + resourceDirectoryParts = append(resourceDirectoryParts, "core") + } + resourceDirectoryParts = append(resourceDirectoryParts, requestInfo.Resource) + + resourceDirectoriesToCheckForIndividualFiles := []string{} + if len(requestInfo.Namespace) > 0 { + parts := append([]string{"namespaces", requestInfo.Namespace}, resourceDirectoryParts...) + resourceDirectoriesToCheckForIndividualFiles = append(resourceDirectoriesToCheckForIndividualFiles, filepath.Join(parts...)) + + } else { + clusterParts := append([]string{"cluster-scoped-resources"}, resourceDirectoryParts...) + resourceDirectoriesToCheckForIndividualFiles = append(resourceDirectoriesToCheckForIndividualFiles, filepath.Join(clusterParts...)) + + namespaces, err := allNamespacesWithData(sourceFS) + switch { + case errors.Is(err, fs.ErrNotExist): + // do nothing and continue + case err != nil: + return nil, fmt.Errorf("unable to read namespaces: %w", err) + } + for _, ns := range namespaces { + nsParts := append([]string{"namespaces", ns}, resourceDirectoryParts...) + resourceDirectoriesToCheckForIndividualFiles = append(resourceDirectoriesToCheckForIndividualFiles, filepath.Join(nsParts...)) + } + } + + allIndividualFilePaths := []string{} + for _, resourceDirectory := range resourceDirectoriesToCheckForIndividualFiles { + individualFiles, err := fs.ReadDir(sourceFS, resourceDirectory) + switch { + case errors.Is(err, fs.ErrNotExist): + continue + case err != nil: + return nil, fmt.Errorf("unable to read resourceDir") + } + + for _, curr := range individualFiles { + allIndividualFilePaths = append(allIndividualFilePaths, filepath.Join(resourceDirectory, curr.Name())) + } + } + + return allIndividualFilePaths, nil +} + +func allPossibleListFileLocations(sourceFS fs.FS, requestInfo *apirequest.RequestInfo) ([]string, error) { + resourceListFileParts := []string{} + if len(requestInfo.APIGroup) > 0 { + resourceListFileParts = append(resourceListFileParts, requestInfo.APIGroup) + } else { + resourceListFileParts = append(resourceListFileParts, "core") + } + resourceListFileParts = append(resourceListFileParts, fmt.Sprintf("%s.yaml", requestInfo.Resource)) + + allPossibleListFileLocations := []string{} + if len(requestInfo.Namespace) > 0 { + parts := append([]string{"namespaces", requestInfo.Namespace}, resourceListFileParts...) + allPossibleListFileLocations = append(allPossibleListFileLocations, filepath.Join(parts...)) + + } else { + clusterParts := append([]string{"cluster-scoped-resources"}, resourceListFileParts...) + allPossibleListFileLocations = append(allPossibleListFileLocations, filepath.Join(clusterParts...)) + + namespaces, err := allNamespacesWithData(sourceFS) + switch { + case errors.Is(err, fs.ErrNotExist): + return allPossibleListFileLocations, nil + case err != nil: + return nil, fmt.Errorf("unable to read namespaces: %w", err) + } + for _, ns := range namespaces { + nsParts := append([]string{"namespaces", ns}, resourceListFileParts...) + allPossibleListFileLocations = append(allPossibleListFileLocations, filepath.Join(nsParts...)) + } + } + + return allPossibleListFileLocations, nil +} + +func allNamespacesWithData(sourceFS fs.FS) ([]string, error) { + nsDirs, err := fs.ReadDir(sourceFS, "namespaces") + if err != nil { + return nil, fmt.Errorf("failed to read allNamespacesWithData: %w", err) + } + + ret := []string{} + for _, curr := range nsDirs { + ret = append(ret, curr.Name()) + } + + return ret, nil +} + +func allPossibleNamespaceFiles(sourceFS fs.FS) ([]string, error) { + allPossibleListFileLocations := []string{} + namespaces, err := allNamespacesWithData(sourceFS) + if err != nil { + return nil, fmt.Errorf("unable to read namespaces: %w", err) + } + + for _, namespace := range namespaces { + allPossibleListFileLocations = append(allPossibleListFileLocations, filepath.Join("namespaces", namespace, namespace+".yaml")) + } + + return allPossibleListFileLocations, nil +} + +func filterByLabelSelector(list *unstructured.UnstructuredList, labelSelector string) (*unstructured.UnstructuredList, error) { + if labelSelector == "" { + return list, nil + } + + parsedSelector, err := labels.Parse(labelSelector) + if err != nil { + return nil, err + } + + var filteredItems []unstructured.Unstructured + for _, item := range list.Items { + if parsedSelector.Matches(labels.Set(item.GetLabels())) { + filteredItems = append(filteredItems, item) + } + } + + return &unstructured.UnstructuredList{ + Object: list.Object, + Items: filteredItems, + }, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_reader.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_reader.go new file mode 100644 index 000000000..2f0a8c649 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_reader.go @@ -0,0 +1,170 @@ +package manifestclient + +import ( + "errors" + "fmt" + "io/fs" + "k8s.io/apimachinery/pkg/runtime/schema" + "os" + "path/filepath" + "regexp" + "sigs.k8s.io/yaml" + "strings" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/util/sets" +) + +func ReadMutationDirectory(mutationDirectory string) (*AllActionsTracker[FileOriginatedSerializedRequest], error) { + return readMutationFS(os.DirFS(mutationDirectory)) +} + +func ReadEmbeddedMutationDirectory(inFS fs.FS) (*AllActionsTracker[FileOriginatedSerializedRequest], error) { + return readMutationFS(inFS) +} + +func readMutationFS(inFS fs.FS) (*AllActionsTracker[FileOriginatedSerializedRequest], error) { + ret := NewAllActionsTracker[FileOriginatedSerializedRequest]() + errs := []error{} + + for _, action := range sets.List(AllActions) { + file, err := inFS.Open(string(action)) + if file != nil { + file.Close() + } + switch { + case errors.Is(err, fs.ErrNotExist): + continue + case err != nil: + errs = append(errs, fmt.Errorf("unable to read %q : %w", action, err)) + continue + case err == nil: + } + actionFS, err := fs.Sub(inFS, string(action)) + if err != nil { + errs = append(errs, fmt.Errorf("unable to create subFS %q: %w", action, err)) + continue + } + + currResourceList, err := readSerializedRequestsFromActionDirectory(action, actionFS) + if err != nil { + errs = append(errs, fmt.Errorf("unable to read %q: %w", action, err)) + } + ret.AddRequests(currResourceList...) + } + + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return ret, nil +} + +func readSerializedRequestsFromActionDirectory(action Action, actionFS fs.FS) ([]FileOriginatedSerializedRequest, error) { + currResourceList := []FileOriginatedSerializedRequest{} + errs := []error{} + err := fs.WalkDir(actionFS, ".", func(currLocation string, currFile fs.DirEntry, err error) error { + if err != nil { + errs = append(errs, err) + } + + if currFile.IsDir() { + return nil + } + if !strings.HasSuffix(currFile.Name(), ".yaml") && !strings.HasSuffix(currFile.Name(), ".json") { + return nil + } + currResource, err := serializedRequestFromFile(action, actionFS, currLocation) + if err != nil { + return fmt.Errorf("error deserializing %q: %w", currLocation, err) + } + if currResource == nil { // not all file are body files, so those can be nil + return nil + } + currResourceList = append(currResourceList, *currResource) + + return nil + }) + if err != nil { + errs = append(errs, err) + } + + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + return currResourceList, nil +} + +var ( + bodyRegex = regexp.MustCompile(`.*-body-(.+).yaml`) + optionsRegex = regexp.MustCompile(`.*-options-(.+).yaml`) +) + +func serializedRequestFromFile(action Action, actionFS fs.FS, bodyFilename string) (*FileOriginatedSerializedRequest, error) { + bodyBasename := filepath.Base(bodyFilename) + if !bodyRegex.MatchString(bodyBasename) { + return nil, nil + } + optionsBaseName := strings.Replace(bodyBasename, "body", "options", 1) + optionsFilename := filepath.Join(filepath.Dir(bodyFilename), optionsBaseName) + metadataBaseName := strings.Replace(bodyBasename, "body", "metadata", 1) + metadataFilename := filepath.Join(filepath.Dir(bodyFilename), metadataBaseName) + + bodyContent, err := fs.ReadFile(actionFS, bodyFilename) + if err != nil { + return nil, fmt.Errorf("failed to read %q: %w", bodyFilename, err) + } + + metadataContent, err := fs.ReadFile(actionFS, metadataFilename) + if err != nil { + return nil, fmt.Errorf("failed to read %q: %w", metadataFilename, err) + } + metadataFromFile := &ActionMetadata{} + if err := yaml.Unmarshal(metadataContent, metadataFromFile); err != nil { + return nil, fmt.Errorf("failed to parse %q: %w", metadataFilename, err) + } + + optionsExist := false + optionsContent, err := fs.ReadFile(actionFS, optionsFilename) + switch { + case errors.Is(err, fs.ErrNotExist): + // not required, do nothing + case err != nil: + return nil, fmt.Errorf("failed to read %q: %w", optionsFilename, err) + case err == nil: + optionsExist = true + } + + // parse to discover bits of the serialized request + kindType := schema.GroupVersionKind{} + actionHasRuntimeObjectBody := action != ActionPatch && action != ActionPatchStatus + if actionHasRuntimeObjectBody { + retObj, _, jsonErr := unstructured.UnstructuredJSONScheme.Decode(bodyContent, nil, &unstructured.Unstructured{}) + if jsonErr != nil { + // try to see if it's yaml + jsonString, err := yaml.YAMLToJSON(bodyContent) + if err != nil { + return nil, fmt.Errorf("unable to decode %q as json: %w", bodyFilename, jsonErr) + } + retObj, _, err = unstructured.UnstructuredJSONScheme.Decode(jsonString, nil, &unstructured.Unstructured{}) + if err != nil { + return nil, fmt.Errorf("unable to decode %q as yaml: %w", bodyFilename, err) + } + kindType = retObj.(*unstructured.Unstructured).GroupVersionKind() + } + } + + ret := &FileOriginatedSerializedRequest{ + BodyFilename: bodyFilename, + SerializedRequest: SerializedRequest{ + ActionMetadata: *metadataFromFile, + KindType: kindType, + Body: bodyContent, + }, + } + if optionsExist { + ret.OptionsFilename = optionsFilename + ret.SerializedRequest.Options = optionsContent + } + + return ret, nil +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_writer.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_writer.go new file mode 100644 index 000000000..cac4c3c72 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_directory_writer.go @@ -0,0 +1,47 @@ +package manifestclient + +import ( + "errors" + "fmt" + "os" + "path/filepath" + "sigs.k8s.io/yaml" +) + +func WriteMutationDirectory[T SerializedRequestish](mutationDirectory string, requests ...T) error { + errs := []error{} + + for _, request := range requests { + metadataFilename, bodyFilename, optionsFilename := request.SuggestedFilenames() + bodyPath := filepath.Join(mutationDirectory, bodyFilename) + metadataPath := filepath.Join(mutationDirectory, metadataFilename) + + metadataBytes, err := yaml.Marshal(request.GetSerializedRequest().GetLookupMetadata()) + if err != nil { + errs = append(errs, fmt.Errorf("unable to serialize metadata %v: %w", request.GetSerializedRequest().ActionMetadata, err)) + continue + } + + parentDir := filepath.Dir(bodyPath) + if err := os.MkdirAll(parentDir, 0755); err != nil { + errs = append(errs, fmt.Errorf("unable to create parentDir %q: %w", parentDir, err)) + continue + } + + if err := os.WriteFile(metadataPath, metadataBytes, 0644); err != nil { + errs = append(errs, fmt.Errorf("unable to write body %v: %w", request, err)) + } + if err := os.WriteFile(bodyPath, request.GetSerializedRequest().Body, 0644); err != nil { + errs = append(errs, fmt.Errorf("unable to write body %v: %w", request, err)) + } + if len(request.GetSerializedRequest().Options) > 0 { + optionsPath := filepath.Join(mutationDirectory, optionsFilename) + if err := os.WriteFile(optionsPath, request.GetSerializedRequest().Options, 0644); err != nil { + errs = append(errs, fmt.Errorf("unable to write options %v: %w", request, err)) + } + } + + } + + return errors.Join(errs...) +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_tracker.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_tracker.go new file mode 100644 index 000000000..b0548a758 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/mutation_tracker.go @@ -0,0 +1,164 @@ +package manifestclient + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +const DeletionNameAnnotation = "operator.openshift.io/deletion-name" +const SyntheticControllerInstanceNameAnnotation = "synthetic.mom.openshift.io/controller-instance-name" + +type Action string + +const ( + // this is really a subset of patch, but we treat it separately because it is useful to do so + ActionPatch Action = "Patch" + ActionPatchStatus Action = "PatchStatus" + ActionApply Action = "Apply" + ActionApplyStatus Action = "ApplyStatus" + ActionUpdate Action = "Update" + ActionUpdateStatus Action = "UpdateStatus" + ActionCreate Action = "Create" + ActionDelete Action = "Delete" +) + +var ( + AllActions = sets.New[Action]( + ActionPatch, + ActionPatchStatus, + ActionApply, + ActionApplyStatus, + ActionUpdate, + ActionUpdateStatus, + ActionCreate, + ActionDelete, + ) +) + +type AllActionsTracker[T SerializedRequestish] struct { + actionToTracker map[Action]*actionTracker[T] +} + +type ActionMetadata struct { + Action Action `json:"action"` + ResourceMetadata `json:",inline"` + + PatchType string `json:"patchType,omitempty"` + FieldManager string `json:"fieldManager,omitempty"` + ControllerInstanceName string `json:"controllerInstanceName"` +} + +// ResourceMetadata uniquely identifies an item in the API +// This is probably shareable across multiple packages. +type ResourceMetadata struct { + ResourceType schema.GroupVersionResource `json:"resourceType"` + Namespace string `json:"namespace,omitempty"` + Name string `json:"name"` + GenerateName string `json:"generateName"` +} + +type actionTracker[T SerializedRequestish] struct { + action Action + + requests []T +} + +func NewAllActionsTracker[T SerializedRequestish]() *AllActionsTracker[T] { + return &AllActionsTracker[T]{ + actionToTracker: make(map[Action]*actionTracker[T]), + } +} + +func (a *AllActionsTracker[T]) AddRequests(requests ...T) { + for _, request := range requests { + a.AddRequest(request) + } +} + +func (a *AllActionsTracker[T]) AddRequest(request T) { + if a.actionToTracker == nil { + a.actionToTracker = map[Action]*actionTracker[T]{} + } + action := request.GetSerializedRequest().Action + if _, ok := a.actionToTracker[action]; !ok { + a.actionToTracker[action] = &actionTracker[T]{action: action} + } + a.actionToTracker[action].AddRequest(request) +} + +func (a *AllActionsTracker[T]) ListActions() []Action { + return sets.List(sets.KeySet(a.actionToTracker)) +} + +func (a *AllActionsTracker[T]) RequestsForAction(action Action) []SerializedRequestish { + ret := []SerializedRequestish{} + tracker, ok := a.actionToTracker[action] + if !ok { + return nil + } + mutations := tracker.Mutations() + for _, mutation := range mutations { + ret = append(ret, mutation) + } + return ret +} + +func (a *AllActionsTracker[T]) RequestsForResource(metadata ActionMetadata) []SerializedRequestish { + ret := []SerializedRequestish{} + tracker, ok := a.actionToTracker[metadata.Action] + if !ok { + return nil + } + mutations := tracker.Mutations() + for _, mutation := range mutations { + if mutation.GetSerializedRequest().GetLookupMetadata() == metadata { + ret = append(ret, mutation) + } + } + return ret +} + +func (a *AllActionsTracker[T]) AllRequests() []SerializedRequestish { + ret := []SerializedRequestish{} + for _, currActionTracker := range a.actionToTracker { + mutations := currActionTracker.Mutations() + for _, mutation := range mutations { + ret = append(ret, mutation) + } + } + return ret +} + +func (a *AllActionsTracker[T]) DeepCopy() *AllActionsTracker[T] { + ret := &AllActionsTracker[T]{ + actionToTracker: make(map[Action]*actionTracker[T]), + } + + for k, v := range a.actionToTracker { + ret.actionToTracker[k] = v.DeepCopy() + } + return ret +} + +func (a *actionTracker[T]) AddRequest(request T) { + if a.action != request.GetSerializedRequest().Action { + panic("coding error") + } + a.requests = append(a.requests, request) +} + +func (a *actionTracker[T]) Mutations() []T { + return a.requests +} + +func (a *actionTracker[T]) DeepCopy() *actionTracker[T] { + ret := &actionTracker[T]{ + action: a.action, + requests: make([]T, 0), + } + + for _, v := range a.requests { + ret.requests = append(ret.requests, v.DeepCopy().(T)) + } + return ret +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/read_roundtripper.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/read_roundtripper.go new file mode 100644 index 000000000..bb0290632 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/read_roundtripper.go @@ -0,0 +1,158 @@ +package manifestclient + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "net/http" + "strconv" + "strings" + "time" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/features" + "k8s.io/apiserver/pkg/server" + utilfeature "k8s.io/apiserver/pkg/util/feature" +) + +func init() { + // This feature gate is needed to set requestInfo.LabelSelector + utilruntime.Must(utilfeature.DefaultMutableFeatureGate.Set(fmt.Sprintf("%s=true", features.AuthorizeWithSelectors))) +} + +type manifestRoundTripper struct { + sourceFS fs.FS + + // requestInfoResolver is the same type constructed the same way as the kube-apiserver + requestInfoResolver *apirequest.RequestInfoFactory + + discoveryReader *discoveryReader +} + +func newReadRoundTripper(content fs.FS, discoveryRoundTripper *discoveryReader) *manifestRoundTripper { + return &manifestRoundTripper{ + sourceFS: content, + requestInfoResolver: server.NewRequestInfoResolver(&server.Config{ + LegacyAPIGroupPrefixes: sets.NewString(server.DefaultLegacyAPIPrefix), + }), + discoveryReader: discoveryRoundTripper, + } +} + +// RoundTrip will allow performing read requests very similar to a kube-apiserver against a must-gather style directory. +// Only GETs. +// no watches. (maybe add watches +func (mrt *manifestRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + requestInfo, err := mrt.requestInfoResolver.NewRequestInfo(req) + if err != nil { + return nil, fmt.Errorf("failed reading requestInfo: %w", err) + } + + isDiscovery := isServerGroupResourceDiscovery(requestInfo.Path) + if !requestInfo.IsResourceRequest && !isDiscovery { + return nil, fmt.Errorf("non-resource requests are not supported by this implementation: %q", requestInfo.Path) + } + if len(requestInfo.Subresource) != 0 { + return nil, fmt.Errorf("subresource %v is not supported by this implementation", requestInfo.Subresource) + } + if isDiscovery && requestInfo.Verb != "get" { + // TODO handle group resource discovery + return nil, fmt.Errorf("group resource discovery is not supported unless it is a GET request") + } + + var returnBody []byte + var returnErr error + switch requestInfo.Verb { + case "get": + if isDiscovery { + returnBody, returnErr = mrt.discoveryReader.getGroupResourceDiscovery(requestInfo) + } else { + // TODO handle label and field selectors because single item lists are GETs + returnBody, returnErr = mrt.get(requestInfo) + } + case "list": + // TODO handle label and field selectors + returnBody, returnErr = mrt.list(requestInfo) + + case "watch": + // our watches do nothing. We keep the connection alive (I think), but nothing else. + timeoutSecondsString := req.URL.Query().Get("timeoutSeconds") + timeoutDuration := 10 * time.Minute + if len(timeoutSecondsString) > 0 { + currSeconds, err := strconv.ParseInt(timeoutSecondsString, 10, 32) + if err != nil { + returnErr = err + break + } + timeoutDuration = time.Duration(currSeconds) * time.Second + } + resp := &http.Response{} + resp.StatusCode = http.StatusOK + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = newDelayedNothingReader(timeoutDuration) + return resp, nil + + default: + return nil, fmt.Errorf("verb %v is not supported by this implementation", requestInfo.Verb) + } + + resp := &http.Response{ + Header: map[string][]string{}, + } + switch { + case apierrors.IsNotFound(returnErr): + resp.StatusCode = http.StatusNotFound + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewBufferString(returnErr.Error())) + case returnErr != nil: + resp.StatusCode = http.StatusInternalServerError + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewBufferString(returnErr.Error())) + default: + resp.StatusCode = http.StatusOK + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewReader(returnBody)) + // We always return application/json. Avoid clients expecting proto for built-ins. + resp.Header = make(http.Header) + if isDiscovery { + resp.Header.Set("Content-Type", "application/json;as=APIGroupDiscoveryList;v=v2;g=apidiscovery.k8s.io") + } else { + resp.Header.Set("Content-Type", "application/json") + } + } + + return resp, nil +} + +func newNotFound(requestInfo *apirequest.RequestInfo) error { + return apierrors.NewNotFound(schema.GroupResource{ + Group: requestInfo.APIGroup, + Resource: requestInfo.Resource, + }, requestInfo.Name) +} + +// checking for /apis// +// In this case we will return the list of resources for the group. +func isServerGroupResourceDiscovery(path string) bool { + // Corev1 is a special case. + if path == "/api/v1" { + return true + } + if path == "/api" { + return true + } + if path == "/apis" { + return true + } + + parts := strings.Split(path, "/") + if len(parts) != 4 { + return false + } + return parts[0] == "" && parts[1] == "apis" +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/readwrite_roundtripper.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/readwrite_roundtripper.go new file mode 100644 index 000000000..0e517c47e --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/readwrite_roundtripper.go @@ -0,0 +1,118 @@ +package manifestclient + +import ( + "bytes" + "fmt" + "io" + "io/fs" + "net/http" + "os" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" +) + +// RecommendedRESTConfig is meant to be paired with the HTTPClients below +func RecommendedRESTConfig() *rest.Config { + return &rest.Config{ + QPS: 1000, + Burst: 10000, + ContentConfig: rest.ContentConfig{ + ContentType: runtime.ContentTypeJSON, + }, + } +} + +// RecommendedKubernetesWithClient kubernetes client to be used with the HTTPClients below +func RecommendedKubernetesWithClient(httpClient *http.Client) (*kubernetes.Clientset, error) { + return kubernetes.NewForConfigAndClient(RecommendedRESTConfig(), httpClient) +} + +// Enter here and call `NewForConfigAndClient(manifestclient.RecommendedRESTConfig(), httpClient)` +// For Kubernetes built in clients, use `manifestclient.RecommendedKubernetesWithClient(httpClient)` +func NewHTTPClient(mustGatherDir string) MutationTrackingClient { + mutationTrackingRoundTripper := newReadWriteRoundTripper(os.DirFS(mustGatherDir)) + return &mutationTrackingClient{ + httpClient: &http.Client{ + Transport: mutationTrackingRoundTripper, + }, + mutationTrackingRoundTripper: mutationTrackingRoundTripper, + } +} + +// Enter here and call `NewForConfigAndClient(manifestclient.RecommendedRESTConfig(), httpClient)` +// For Kubernetes built in clients, use `manifestclient.RecommendedKubernetesWithClient(httpClient)` +func NewTestingHTTPClient(embedFS fs.FS) MutationTrackingClient { + mutationTrackingRoundTripper := newReadWriteRoundTripper(embedFS) + return &mutationTrackingClient{ + httpClient: &http.Client{ + Transport: mutationTrackingRoundTripper, + }, + mutationTrackingRoundTripper: mutationTrackingRoundTripper, + } +} + +func NewTestingRoundTripper(embedFS fs.FS) *readWriteRoundTripper { + return newReadWriteRoundTripper(embedFS) +} + +func NewRoundTripper(mustGatherDir string) *readWriteRoundTripper { + return newReadWriteRoundTripper(os.DirFS(mustGatherDir)) +} + +func newReadWriteRoundTripper(sourceFS fs.FS) *readWriteRoundTripper { + rt := &readWriteRoundTripper{} + discoveryReader := newDiscoveryReader(sourceFS) + rt.readDelegate = newReadRoundTripper(sourceFS, discoveryReader) + rt.writeDelegate = newWriteRoundTripper(discoveryReader) + return rt +} + +type readWriteRoundTripper struct { + readDelegate *manifestRoundTripper + writeDelegate *writeTrackingRoundTripper +} + +type MutationTrackingRoundTripper interface { + http.RoundTripper + GetMutations() *AllActionsTracker[TrackedSerializedRequest] +} + +type mutationTrackingClient struct { + httpClient *http.Client + + mutationTrackingRoundTripper MutationTrackingRoundTripper +} + +func (m mutationTrackingClient) GetHTTPClient() *http.Client { + return m.httpClient +} + +func (m mutationTrackingClient) GetMutations() *AllActionsTracker[TrackedSerializedRequest] { + return m.mutationTrackingRoundTripper.GetMutations() +} + +type MutationTrackingClient interface { + GetHTTPClient() *http.Client + GetMutations() *AllActionsTracker[TrackedSerializedRequest] +} + +func (rt *readWriteRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + switch req.Method { + case "GET", "HEAD": + return rt.readDelegate.RoundTrip(req) + case "POST", "PUT", "PATCH", "DELETE": + return rt.writeDelegate.RoundTrip(req) + default: + resp := &http.Response{} + resp.StatusCode = http.StatusInternalServerError + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewBufferString(fmt.Sprintf("unhandled verb: %q", req.Method))) + return resp, nil + } +} + +func (rt *readWriteRoundTripper) GetMutations() *AllActionsTracker[TrackedSerializedRequest] { + return rt.writeDelegate.GetMutations() +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/serialized_request.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/serialized_request.go new file mode 100644 index 000000000..7023ed356 --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/serialized_request.go @@ -0,0 +1,371 @@ +package manifestclient + +import ( + "bytes" + "crypto/sha256" + "fmt" + "path/filepath" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type SerializedRequestish interface { + GetSerializedRequest() *SerializedRequest + SuggestedFilenames() (string, string, string) + DeepCopy() SerializedRequestish +} + +type FileOriginatedSerializedRequest struct { + MetadataFilename string + BodyFilename string + OptionsFilename string + + SerializedRequest SerializedRequest +} + +type TrackedSerializedRequest struct { + RequestNumber int + + SerializedRequest SerializedRequest +} + +type SerializedRequest struct { + ActionMetadata + KindType schema.GroupVersionKind + + Options []byte + Body []byte +} + +func RequestsForResource[S ~[]E, E SerializedRequestish](mutations S, metadata ActionMetadata) []SerializedRequestish { + ret := []SerializedRequestish{} + for _, mutation := range mutations { + if mutation.GetSerializedRequest().GetLookupMetadata() == metadata { + ret = append(ret, mutation) + } + } + return ret +} + +// Difference returns a set of objects that are not in s2. +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func DifferenceOfSerializedRequests[S ~[]E, E SerializedRequestish, T ~[]F, F SerializedRequestish](lhs S, rhs T) S { + ret := S{} + + for i, currLHS := range lhs { + found := false + for _, currRHS := range rhs { + if EquivalentSerializedRequests(currLHS, currRHS) { + found = true + break + } + } + if !found { + ret = append(ret, lhs[i]) + } + } + return ret +} + +func AreAllSerializedRequestsEquivalent[S ~[]E, E SerializedRequestish, T ~[]F, F SerializedRequestish](lhs S, rhs T) bool { + if len(DifferenceOfSerializedRequests(lhs, rhs)) != 0 { + return false + } + if len(DifferenceOfSerializedRequests(rhs, lhs)) != 0 { + return false + } + return true +} + +func AreAllSerializedRequestsEquivalentWithReasons[S ~[]E, E SerializedRequestish, T ~[]F, F SerializedRequestish](lhs S, rhs T) (bool, []SerializedRequest, []SerializedRequest) { + missingInRHS := DifferenceOfSerializedRequests(lhs, rhs) + missingInLHS := DifferenceOfSerializedRequests(rhs, lhs) + + if len(missingInRHS) == 0 && len(missingInLHS) == 0 { + return true, nil, nil + } + + missingInRHSAsSerializedRequest := []SerializedRequest{} + missingInLHSAsSerializedRequest := []SerializedRequest{} + for _, curr := range missingInRHS { + missingInRHSAsSerializedRequest = append(missingInRHSAsSerializedRequest, *curr.GetSerializedRequest()) + } + for _, curr := range missingInLHS { + missingInLHSAsSerializedRequest = append(missingInLHSAsSerializedRequest, *curr.GetSerializedRequest()) + } + + return false, missingInRHSAsSerializedRequest, missingInLHSAsSerializedRequest +} + +func EquivalentSerializedRequests(lhs, rhs SerializedRequestish) bool { + return lhs.GetSerializedRequest().Equals(rhs.GetSerializedRequest()) +} + +func MakeFilenameGoModSafe(in string) string { + // go mod doesn't like colons, so rename those. We might theoretically conflict, but we shouldn't practically do so often + return strings.Replace(in, ":", "-COLON-", -1) +} + +func (lhs *FileOriginatedSerializedRequest) Equals(rhs *FileOriginatedSerializedRequest) bool { + return CompareFileOriginatedSerializedRequest(lhs, rhs) == 0 +} + +func CompareFileOriginatedSerializedRequest(lhs, rhs *FileOriginatedSerializedRequest) int { + switch { + case lhs == nil && rhs == nil: + return 0 + case lhs == nil && rhs != nil: + return 1 + case lhs != nil && rhs == nil: + return -1 + } + + if cmp := CompareSerializedRequest(&lhs.SerializedRequest, &rhs.SerializedRequest); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.MetadataFilename, rhs.MetadataFilename); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.BodyFilename, rhs.BodyFilename); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.OptionsFilename, rhs.OptionsFilename); cmp != 0 { + return cmp + } + + return 0 +} + +func (lhs *TrackedSerializedRequest) Equals(rhs *TrackedSerializedRequest) bool { + return CompareTrackedSerializedRequest(lhs, rhs) == 0 +} + +func CompareTrackedSerializedRequest(lhs, rhs *TrackedSerializedRequest) int { + switch { + case lhs == nil && rhs == nil: + return 0 + case lhs == nil && rhs != nil: + return 1 + case lhs != nil && rhs == nil: + return -1 + } + + if lhs.RequestNumber < rhs.RequestNumber { + return -1 + } else if lhs.RequestNumber > rhs.RequestNumber { + return 1 + } + + return CompareSerializedRequest(&lhs.SerializedRequest, &rhs.SerializedRequest) +} + +func (lhs *SerializedRequest) Equals(rhs *SerializedRequest) bool { + return CompareSerializedRequest(lhs, rhs) == 0 +} + +func CompareSerializedRequest(lhs, rhs *SerializedRequest) int { + switch { + case lhs == nil && rhs == nil: + return 0 + case lhs == nil && rhs != nil: + return 1 + case lhs != nil && rhs == nil: + return -1 + } + + if cmp := strings.Compare(string(lhs.Action), string(rhs.Action)); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.PatchType, rhs.PatchType); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.FieldManager, rhs.FieldManager); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.ControllerInstanceName, rhs.ControllerInstanceName); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(lhs.ResourceType.Group, rhs.ResourceType.Group); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.ResourceType.Version, rhs.ResourceType.Version); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.ResourceType.Resource, rhs.ResourceType.Resource); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(lhs.KindType.Group, rhs.KindType.Group); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.KindType.Version, rhs.KindType.Version); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.KindType.Kind, rhs.KindType.Kind); cmp != 0 { + return cmp + } + + if cmp := strings.Compare(lhs.Namespace, rhs.Namespace); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.Name, rhs.Name); cmp != 0 { + return cmp + } + if cmp := strings.Compare(lhs.GenerateName, rhs.GenerateName); cmp != 0 { + return cmp + } + + if cmp := bytes.Compare(lhs.Body, rhs.Body); cmp != 0 { + return cmp + } + if cmp := bytes.Compare(lhs.Options, rhs.Options); cmp != 0 { + return cmp + } + + return 0 +} + +func (a FileOriginatedSerializedRequest) GetSerializedRequest() *SerializedRequest { + return &a.SerializedRequest +} + +func (a TrackedSerializedRequest) GetSerializedRequest() *SerializedRequest { + return &a.SerializedRequest +} + +func (a SerializedRequest) GetSerializedRequest() *SerializedRequest { + return &a +} + +func (a FileOriginatedSerializedRequest) SuggestedFilenames() (string, string, string) { + return a.MetadataFilename, a.BodyFilename, a.OptionsFilename +} + +func (a TrackedSerializedRequest) SuggestedFilenames() (string, string, string) { + return suggestedFilenames(a.SerializedRequest) +} + +func (a SerializedRequest) SuggestedFilenames() (string, string, string) { + return suggestedFilenames(a) +} + +func suggestedFilenames(a SerializedRequest) (string, string, string) { + bodyHash := hashRequestToPrefix(a.Body, a.Options) + + groupName := a.ResourceType.Group + if len(groupName) == 0 { + groupName = "core" + } + + scopingString := "" + if len(a.Namespace) > 0 { + scopingString = filepath.Join("namespaces", a.Namespace) + } else { + scopingString = filepath.Join("cluster-scoped-resources") + } + + metadataFilename := MakeFilenameGoModSafe( + filepath.Join( + string(a.Action), + scopingString, + groupName, + a.ResourceType.Resource, + fmt.Sprintf("%s-metadata-%s%s.yaml", bodyHash, a.Name, a.GenerateName), + ), + ) + bodyFilename := MakeFilenameGoModSafe( + filepath.Join( + string(a.Action), + scopingString, + groupName, + a.ResourceType.Resource, + fmt.Sprintf("%s-body-%s%s.yaml", bodyHash, a.Name, a.GenerateName), + ), + ) + optionsFilename := "" + if len(a.Options) > 0 { + optionsFilename = MakeFilenameGoModSafe( + filepath.Join( + string(a.Action), + scopingString, + groupName, + a.ResourceType.Resource, + fmt.Sprintf("%s-options-%s%s.yaml", bodyHash, a.Name, a.GenerateName), + ), + ) + } + return metadataFilename, bodyFilename, optionsFilename +} + +func hashRequestToPrefix(data, options []byte) string { + switch { + case len(data) > 0: + return hashForFilenamePrefix(data) + case len(options) > 0: + return hashForFilenamePrefix(options) + default: + return "MISSING" + } +} + +func hashForFilenamePrefix(data []byte) string { + if len(data) == 0 { + return "MISSING" + } + hash := sha256.New() + hash.Write(data) + hashBytes := hash.Sum(nil) + + // we're looking to deconflict filenames, not protect the crown jewels + return fmt.Sprintf("%x", hashBytes[len(hashBytes)-2:]) +} + +func (a FileOriginatedSerializedRequest) DeepCopy() SerializedRequestish { + return FileOriginatedSerializedRequest{ + MetadataFilename: a.MetadataFilename, + BodyFilename: a.BodyFilename, + OptionsFilename: a.OptionsFilename, + SerializedRequest: a.SerializedRequest.DeepCopy().(SerializedRequest), + } +} + +func (a TrackedSerializedRequest) DeepCopy() SerializedRequestish { + return TrackedSerializedRequest{ + RequestNumber: a.RequestNumber, + SerializedRequest: a.SerializedRequest.DeepCopy().(SerializedRequest), + } +} + +func (a SerializedRequest) DeepCopy() SerializedRequestish { + return SerializedRequest{ + ActionMetadata: ActionMetadata{ + Action: a.Action, + ResourceMetadata: ResourceMetadata{ + ResourceType: a.ResourceType, + Namespace: a.Namespace, + Name: a.Name, + GenerateName: a.GenerateName, + }, + PatchType: a.PatchType, + FieldManager: a.FieldManager, + ControllerInstanceName: a.ControllerInstanceName, + }, + KindType: a.KindType, + Options: bytes.Clone(a.Options), + Body: bytes.Clone(a.Body), + } +} + +func (a SerializedRequest) StringID() string { + return fmt.Sprintf("%s-%s.%s.%s/%s%s[%s]", a.Action, a.KindType.Kind, a.KindType.Version, a.KindType.Group, a.Name, a.GenerateName, a.Namespace) +} + +func (a SerializedRequest) GetLookupMetadata() ActionMetadata { + return a.ActionMetadata +} diff --git a/vendor/github.com/openshift/library-go/pkg/manifestclient/write_roundtripper.go b/vendor/github.com/openshift/library-go/pkg/manifestclient/write_roundtripper.go new file mode 100644 index 000000000..fcdc3cd0a --- /dev/null +++ b/vendor/github.com/openshift/library-go/pkg/manifestclient/write_roundtripper.go @@ -0,0 +1,276 @@ +package manifestclient + +import ( + "bytes" + "fmt" + "io" + "net/http" + "strings" + "sync" + + metainternalversionscheme "k8s.io/apimachinery/pkg/apis/meta/internalversion/scheme" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/sets" + apirequest "k8s.io/apiserver/pkg/endpoints/request" + "k8s.io/apiserver/pkg/server" + "sigs.k8s.io/yaml" +) + +var validHeaders = sets.New( + runtime.ContentTypeJSON, + string(types.ApplyPatchType), + string(types.JSONPatchType), + string(types.MergePatchType), + string(types.StrategicMergePatchType), +) + +// Saves all mutations for later serialization and/or inspection. +// In the case of updating the same thing multiple times, all mutations are stored and it's up to the caller to decide +// what to do. +type writeTrackingRoundTripper struct { + // requestInfoResolver is the same type constructed the same way as the kube-apiserver + requestInfoResolver *apirequest.RequestInfoFactory + + discoveryReader *discoveryReader + + lock sync.RWMutex + nextRequestNumber int + actionTracker *AllActionsTracker[TrackedSerializedRequest] +} + +func newWriteRoundTripper(discoveryRoundTripper *discoveryReader) *writeTrackingRoundTripper { + return &writeTrackingRoundTripper{ + nextRequestNumber: 1, + actionTracker: &AllActionsTracker[TrackedSerializedRequest]{}, + requestInfoResolver: server.NewRequestInfoResolver(&server.Config{ + LegacyAPIGroupPrefixes: sets.NewString(server.DefaultLegacyAPIPrefix), + }), + discoveryReader: discoveryRoundTripper, + } +} + +func (mrt *writeTrackingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + resp := &http.Response{} + + retJSONBytes, err := mrt.roundTrip(req) + if err != nil { + resp.StatusCode = http.StatusInternalServerError + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewBufferString(err.Error())) + return resp, nil + } + + resp.StatusCode = http.StatusOK + resp.Status = http.StatusText(resp.StatusCode) + resp.Body = io.NopCloser(bytes.NewReader(retJSONBytes)) + // We always return application/json. Avoid clients expecting proto for built-ins. + // this may or may not work for apply. Guess we'll find out. + resp.Header = make(http.Header) + resp.Header.Set("Content-Type", "application/json") + + return resp, nil +} + +func (mrt *writeTrackingRoundTripper) roundTrip(req *http.Request) ([]byte, error) { + requestInfo, err := mrt.requestInfoResolver.NewRequestInfo(req) + if err != nil { + return nil, fmt.Errorf("failed reading requestInfo: %w", err) + } + + if !requestInfo.IsResourceRequest { + return nil, fmt.Errorf("non-resource requests are not supported by this implementation") + } + if len(requestInfo.Subresource) != 0 && requestInfo.Subresource != "status" { + return nil, fmt.Errorf("subresource %v is not supported by this implementation", requestInfo.Subresource) + } + if contentType := req.Header.Get("Content-Type"); !validHeaders.Has(contentType) { + return nil, fmt.Errorf("incorrect Content-Type header, expected one of: [%s] but got: %s", strings.Join(validHeaders.UnsortedList(), ", "), contentType) + } + + patchType := "" + var action Action + switch { + case requestInfo.Verb == "create" && len(requestInfo.Subresource) == 0: + action = ActionCreate + case requestInfo.Verb == "update" && len(requestInfo.Subresource) == 0: + action = ActionUpdate + case requestInfo.Verb == "update" && requestInfo.Subresource == "status": + action = ActionUpdateStatus + case requestInfo.Verb == "patch" && req.Header.Get("Content-Type") == string(types.ApplyPatchType) && len(requestInfo.Subresource) == 0: + action = ActionApply + case requestInfo.Verb == "patch" && req.Header.Get("Content-Type") == string(types.ApplyPatchType) && requestInfo.Subresource == "status": + action = ActionApplyStatus + case requestInfo.Verb == "patch" && len(requestInfo.Subresource) == 0: + action = ActionPatch + patchType = req.Header.Get("Content-Type") + case requestInfo.Verb == "patch" && requestInfo.Subresource == "status": + action = ActionPatchStatus + patchType = req.Header.Get("Content-Type") + case requestInfo.Verb == "delete" && len(requestInfo.Subresource) == 0: + action = ActionDelete + default: + return nil, fmt.Errorf("verb %v is not supported by this implementation", requestInfo.Verb) + } + + var opts runtime.Object + switch action { + case ActionPatch, ActionPatchStatus: + opts = &metav1.PatchOptions{} + case ActionApply, ActionApplyStatus: + opts = &metav1.PatchOptions{} + case ActionUpdate, ActionUpdateStatus: + opts = &metav1.UpdateOptions{} + case ActionCreate: + opts = &metav1.CreateOptions{} + case ActionDelete: + opts = &metav1.DeleteOptions{} + } + if err := metainternalversionscheme.ParameterCodec.DecodeParameters(req.URL.Query(), metav1.SchemeGroupVersion, opts); err != nil { + return nil, fmt.Errorf("unable to parse query parameters: %w", err) + } + + optionsBytes, err := yaml.Marshal(opts) + if err != nil { + return nil, fmt.Errorf("unable to encode options: %w", err) + } + if strings.TrimSpace(string(optionsBytes)) == "{}" { + optionsBytes = nil + } + + bodyContent := []byte{} + if req.Body != nil { + bodyContent, err = io.ReadAll(req.Body) + if err != nil { + return nil, fmt.Errorf("failed to read body: %w", err) + } + } + + var bodyObj runtime.Object + actionHasRuntimeObjectBody := action != ActionPatch && action != ActionPatchStatus + if actionHasRuntimeObjectBody { + bodyObj, err = runtime.Decode(unstructured.UnstructuredJSONScheme, bodyContent) + if err != nil { + return nil, fmt.Errorf("unable to decode body: %w", err) + } + if requestInfo.Namespace != bodyObj.(*unstructured.Unstructured).GetNamespace() { + return nil, fmt.Errorf("request namespace %q does not equal body namespace %q", requestInfo.Namespace, bodyObj.(*unstructured.Unstructured).GetNamespace()) + } + if action != ActionCreate && action != ActionDelete && requestInfo.Name != bodyObj.(*unstructured.Unstructured).GetName() { + return nil, fmt.Errorf("request name %q does not equal body name %q", requestInfo.Namespace, bodyObj.(*unstructured.Unstructured).GetNamespace()) + } + } + + gvr := schema.GroupVersionResource{ + Group: requestInfo.APIGroup, + Version: requestInfo.APIVersion, + Resource: requestInfo.Resource, + } + metadataName := requestInfo.Name + if action == ActionCreate { + // in this case, the name isn't in the URL, it's in the body + metadataName = bodyObj.(*unstructured.Unstructured).GetName() + } + + bodyOutputBytes := bodyContent + generatedName := "" + kindType := schema.GroupVersionKind{} + if actionHasRuntimeObjectBody { + bodyOutputBytes, err = yaml.Marshal(bodyObj.(*unstructured.Unstructured).Object) + if err != nil { + return nil, fmt.Errorf("unable to encode body: %w", err) + } + generatedName = bodyObj.(*unstructured.Unstructured).GetGenerateName() + kindType = bodyObj.GetObjectKind().GroupVersionKind() + } else if (action == ActionPatch || action == ActionPatchStatus) && patchType == string(types.JSONPatchType) { + // the following code gives nice formatting for + // JSON patches that will be stored in files. + var jsonPatchOperations []map[string]interface{} + err = json.Unmarshal(bodyOutputBytes, &jsonPatchOperations) + if err != nil { + return nil, fmt.Errorf("unable to decode JSONPatch body: %w", err) + } + bodyOutputBytes, err = yaml.Marshal(jsonPatchOperations) + if err != nil { + return nil, fmt.Errorf("unable to encode JSONPatch body: %w", err) + } + } + + fieldManagerName := "" + if patchOptions, ok := opts.(*metav1.PatchOptions); ok { + fieldManagerName = patchOptions.FieldManager + } + + serializedRequest := SerializedRequest{ + ActionMetadata: ActionMetadata{ + Action: action, + ResourceMetadata: ResourceMetadata{ + ResourceType: gvr, + Namespace: requestInfo.Namespace, + Name: metadataName, + GenerateName: generatedName, + }, + PatchType: patchType, + FieldManager: fieldManagerName, + ControllerInstanceName: ControllerInstanceNameFromContext(req.Context()), + }, + KindType: kindType, + Options: optionsBytes, + Body: bodyOutputBytes, + } + + // this lock also protects the access to actionTracker + mrt.lock.Lock() + defer mrt.lock.Unlock() + trackedRequest := TrackedSerializedRequest{ + RequestNumber: mrt.nextRequestNumber, + SerializedRequest: serializedRequest, + } + mrt.nextRequestNumber++ + + mrt.actionTracker.AddRequest(trackedRequest) + + // returning a value that will probably not cause the wrapping client to fail, but isn't very useful. + // this keeps calling code from depending on the return value. + ret := &unstructured.Unstructured{Object: map[string]interface{}{}} + ret.SetName(serializedRequest.ActionMetadata.Name) + ret.SetNamespace(serializedRequest.ActionMetadata.Namespace) + if actionHasRuntimeObjectBody { + ret.SetGroupVersionKind(bodyObj.GetObjectKind().GroupVersionKind()) + } else { + kindForResource, err := mrt.discoveryReader.getKindForResource(gvr) + if err != nil { + return nil, err + } + ret.SetGroupVersionKind(kindForResource.kind) + } + retBytes, err := json.Marshal(ret.Object) + if err != nil { + return nil, fmt.Errorf("unable to encode body: %w", err) + } + return retBytes, nil +} + +func (mrt *writeTrackingRoundTripper) GetMutations() *AllActionsTracker[TrackedSerializedRequest] { + mrt.lock.Lock() + defer mrt.lock.Unlock() + + return mrt.actionTracker.DeepCopy() +} + +func setAnnotationFor(obj *unstructured.Unstructured, key, value string) { + if obj == nil { + return + } + + annotations := obj.GetAnnotations() + if annotations == nil { + annotations = map[string]string{} + } + annotations[key] = value + obj.SetAnnotations(annotations) +} diff --git a/vendor/github.com/openshift/multi-operator-manager/LICENSE b/vendor/github.com/openshift/multi-operator-manager/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/command.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/command.go new file mode 100644 index 000000000..20772468c --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/command.go @@ -0,0 +1,78 @@ +package libraryinputresources + +import ( + "context" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +type InputResourcesFunc func(ctx context.Context) (*InputResources, error) + +func NewInputResourcesCommand(inputResourcesFn InputResourcesFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + return newInputResourcesCommand(inputResourcesFn, outputResourcesFn, streams) +} + +type inputResourcesFlags struct { + inputResourcesFn InputResourcesFunc + outputResourcesFn libraryoutputresources.OutputResourcesFunc + + streams genericiooptions.IOStreams +} + +func newInputResourcesFlags(streams genericiooptions.IOStreams) *inputResourcesFlags { + return &inputResourcesFlags{ + streams: streams, + } +} + +func newInputResourcesCommand(inputResourcesFn InputResourcesFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + f := newInputResourcesFlags(streams) + f.inputResourcesFn = inputResourcesFn + f.outputResourcesFn = outputResourcesFn + + cmd := &cobra.Command{ + Use: "input-resources", + Short: "List of resources that this operator expects as inputs and the type of cluster those modifications should be applied to.", + + SilenceUsage: true, + SilenceErrors: true, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := f.Validate(); err != nil { + return err + } + o, err := f.ToOptions(ctx) + if err != nil { + return err + } + if err := o.Run(ctx); err != nil { + return err + } + return nil + }, + } + + f.BindFlags(cmd.Flags()) + + return cmd +} + +func (f *inputResourcesFlags) BindFlags(flags *pflag.FlagSet) { +} + +func (f *inputResourcesFlags) Validate() error { + return nil +} + +func (f *inputResourcesFlags) ToOptions(ctx context.Context) (*inputResourcesOptions, error) { + return newInputResourcesOptions( + f.inputResourcesFn, + f.outputResourcesFn, + f.streams, + ), + nil +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/easy_creation.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/easy_creation.go new file mode 100644 index 000000000..b461c293b --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/easy_creation.go @@ -0,0 +1,73 @@ +package libraryinputresources + +func ExactResource(group, version, resource, namespace, name string) ExactResourceID { + return ExactResourceID{ + InputResourceTypeIdentifier: InputResourceTypeIdentifier{ + Group: group, + Version: version, + Resource: resource, + }, + Namespace: namespace, + Name: name, + } +} + +func ExactSecret(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "secrets", namespace, name) +} + +func ExactConfigMap(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "configmaps", namespace, name) +} + +func ExactNamespace(name string) ExactResourceID { + return ExactResource("", "v1", "namespaces", "", name) +} + +func ExactServiceAccount(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "serviceaccounts", namespace, name) +} + +func ExactDeployment(namespace, name string) ExactResourceID { + return ExactResource("apps", "v1", "deployments", namespace, name) +} + +func ExactDaemonSet(namespace, name string) ExactResourceID { + return ExactResource("apps", "v1", "daemonsets", namespace, name) +} + +func ExactClusterOperator(name string) ExactResourceID { + return ExactResource("config.openshift.io", "v1", "clusteroperators", "", name) +} + +func ExactLowLevelOperator(resource string) ExactResourceID { + return ExactResource("operator.openshift.io", "v1", resource, "", "cluster") +} + +func ExactClusterRole(name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "clusterroles", "", name) +} + +func ExactClusterRoleBinding(name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "clusterrolebindings", "", name) +} + +func ExactRole(namespace, name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "roles", namespace, name) +} + +func ExactRoleBinding(namespace, name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "rolebindings", namespace, name) +} + +func ExactConfigResource(resource string) ExactResourceID { + return ExactResource("config.openshift.io", "v1", resource, "", "cluster") +} + +func SecretIdentifierType() InputResourceTypeIdentifier { + return InputResourceTypeIdentifier{ + Group: "", + Version: "v1", + Resource: "secrets", + } +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/options.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/options.go new file mode 100644 index 000000000..c709c278d --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/options.go @@ -0,0 +1,86 @@ +package libraryinputresources + +import ( + "context" + "errors" + "fmt" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + + "k8s.io/cli-runtime/pkg/genericiooptions" + "sigs.k8s.io/yaml" +) + +type inputResourcesOptions struct { + inputResourcesFn InputResourcesFunc + outputResourcesFn libraryoutputresources.OutputResourcesFunc + + streams genericiooptions.IOStreams +} + +func newInputResourcesOptions(inputResourcesFn InputResourcesFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc, streams genericiooptions.IOStreams) *inputResourcesOptions { + return &inputResourcesOptions{ + inputResourcesFn: inputResourcesFn, + outputResourcesFn: outputResourcesFn, + streams: streams, + } +} + +func (o *inputResourcesOptions) Run(ctx context.Context) error { + errs := []error{} + inputResources, err := o.inputResourcesFn(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed generating input resources: %w", err)) + } + outputResources, err := o.outputResourcesFn(ctx) + if err != nil { + errs = append(errs, fmt.Errorf("failed generating input resources: %w", err)) + } + convertedResources := convertOutputToInput(outputResources) + inputResources.ApplyConfigurationResources.ExactResources = append(inputResources.ApplyConfigurationResources.ExactResources, convertedResources.ApplyConfigurationResources.ExactResources...) + inputResources.ApplyConfigurationResources.GeneratedNameResources = append(inputResources.ApplyConfigurationResources.GeneratedNameResources, convertedResources.ApplyConfigurationResources.GeneratedNameResources...) + + errs = append(errs, validateInputResources(inputResources)...) + + inputResourcesYAML, err := yaml.Marshal(inputResources) + if err != nil { + errs = append(errs, fmt.Errorf("failed marshalling input resources: %w", err)) + } + + if _, err := fmt.Fprint(o.streams.Out, string(inputResourcesYAML)); err != nil { + errs = append(errs, fmt.Errorf("failed outputing input resources: %w", err)) + } + + return errors.Join(errs...) +} + +func convertOutputToInput(outputResources *libraryoutputresources.OutputResources) *InputResources { + inputResources := &InputResources{} + + resourceList := []libraryoutputresources.ResourceList{outputResources.ConfigurationResources} + for _, currResourceList := range resourceList { + for _, curr := range currResourceList.ExactResources { + inputResources.ApplyConfigurationResources.ExactResources = append(inputResources.ApplyConfigurationResources.ExactResources, ExactResourceID{ + InputResourceTypeIdentifier: InputResourceTypeIdentifier{ + Group: curr.Group, + Version: curr.Version, + Resource: curr.Resource, + }, + Namespace: curr.Namespace, + Name: curr.Name, + }) + } + for _, curr := range currResourceList.GeneratedNameResources { + inputResources.ApplyConfigurationResources.GeneratedNameResources = append(inputResources.ApplyConfigurationResources.GeneratedNameResources, GeneratedResourceID{ + InputResourceTypeIdentifier: InputResourceTypeIdentifier{ + Group: curr.Group, + Version: curr.Version, + Resource: curr.Resource, + }, + Namespace: curr.Namespace, + GeneratedName: curr.GeneratedName, + }) + } + } + + return inputResources +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/prune_mustgather.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/prune_mustgather.go new file mode 100644 index 000000000..922bbed23 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/prune_mustgather.go @@ -0,0 +1,310 @@ +package libraryinputresources + +import ( + "context" + "errors" + "fmt" + "net/http" + "os" + "path" + + "github.com/PaesslerAG/gval" + "github.com/PaesslerAG/jsonpath" + "github.com/openshift/library-go/pkg/manifestclient" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/client-go/discovery" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/rest" +) + +func WriteRequiredInputResourcesFromMustGather(ctx context.Context, inputResources *InputResources, mustGatherDir, targetDir string) error { + actualResources, err := GetRequiredInputResourcesFromMustGather(ctx, inputResources, mustGatherDir) + if err != nil { + return err + } + + if err := os.MkdirAll(targetDir, 0755); err != nil { + return fmt.Errorf("unable to create %q: %w", targetDir, err) + } + + errs := []error{} + for _, currResource := range actualResources { + if err := WriteResource(currResource, targetDir); err != nil { + errs = append(errs, err) + } + } + + return errors.Join(errs...) +} + +func GetRequiredInputResourcesFromMustGather(ctx context.Context, inputResources *InputResources, mustGatherDir string) ([]*Resource, error) { + dynamicClient, err := NewDynamicClientFromMustGather(mustGatherDir) + if err != nil { + return nil, err + } + + pertinentUnstructureds, err := GetRequiredInputResourcesForResourceList(ctx, inputResources.ApplyConfigurationResources, dynamicClient) + if err != nil { + return nil, err + } + + return unstructuredToMustGatherFormat(pertinentUnstructureds) +} + +func NewDynamicClientFromMustGather(mustGatherDir string) (dynamic.Interface, error) { + httpClient := newHTTPClientFromMustGather(mustGatherDir) + dynamicClient, err := dynamic.NewForConfigAndClient(&rest.Config{}, httpClient) + if err != nil { + return nil, fmt.Errorf("failure creating dynamicClient for NewDynamicClientFromMustGather: %w", err) + } + return dynamicClient, nil +} + +func NewDiscoveryClientFromMustGather(mustGatherDir string) (discovery.AggregatedDiscoveryInterface, error) { + httpClient := newHTTPClientFromMustGather(mustGatherDir) + discoveryClient, err := discovery.NewDiscoveryClientForConfigAndClient(manifestclient.RecommendedRESTConfig(), httpClient) + if err != nil { + return nil, fmt.Errorf("failure creating discoveryClient for NewDiscoveryClientFromMustGather: %w", err) + } + return discoveryClient, nil +} + +func newHTTPClientFromMustGather(mustGatherDir string) *http.Client { + roundTripper := manifestclient.NewRoundTripper(mustGatherDir) + return &http.Client{ + Transport: roundTripper, + } +} + +var builder = gval.Full(jsonpath.Language()) + +func GetRequiredInputResourcesForResourceList(ctx context.Context, resourceList ResourceList, dynamicClient dynamic.Interface) ([]*Resource, error) { + instances := NewUniqueResourceSet() + errs := []error{} + + for _, currResource := range resourceList.ExactResources { + resourceInstance, err := getExactResource(ctx, dynamicClient, currResource) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + errs = append(errs, err) + continue + } + instances.Insert(resourceInstance) + } + + for _, currResource := range resourceList.LabelSelectedResources { + resourceList, err := getResourcesByLabelSelector(ctx, dynamicClient, currResource) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + errs = append(errs, err) + continue + } + instances.Insert(resourceList...) + } + + path := field.NewPath(".") + for i, currResourceRef := range resourceList.ResourceReferences { + currFieldPath := path.Child("resourceReference").Index(i) + + referringResourceInstance, err := getExactResource(ctx, dynamicClient, currResourceRef.ReferringResource) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + errs = append(errs, fmt.Errorf("failed reading referringResource [%v] %#v: %w", currFieldPath, currResourceRef.ReferringResource, err)) + continue + } + instances.Insert(referringResourceInstance) + + switch { + case currResourceRef.ImplicitNamespacedReference != nil: + fieldPathEvaluator, err := builder.NewEvaluable(currResourceRef.ImplicitNamespacedReference.NameJSONPath) + if err != nil { + errs = append(errs, fmt.Errorf("error parsing [%v]: %q: %w", currFieldPath, currResourceRef.ImplicitNamespacedReference.NameJSONPath, err)) + continue + } + + results, err := fieldPathEvaluator(ctx, referringResourceInstance.Content.UnstructuredContent()) + if err != nil { + errs = append(errs, fmt.Errorf("unexpected error finding value for %v from %v with jsonPath: %w", currFieldPath, "TODO", err)) + continue + } + + var resultStrings []string + switch cast := results.(type) { + case string: + resultStrings = []string{cast} + case []string: + resultStrings = cast + case []interface{}: + for _, curr := range cast { + resultStrings = append(resultStrings, fmt.Sprintf("%v", curr)) + } + default: + errs = append(errs, fmt.Errorf("[%v] unexpected error type %T for %#v", currFieldPath, results, results)) + } + + for _, targetResourceName := range resultStrings { + targetRef := ExactResourceID{ + InputResourceTypeIdentifier: currResourceRef.ImplicitNamespacedReference.InputResourceTypeIdentifier, + Namespace: currResourceRef.ImplicitNamespacedReference.Namespace, + Name: targetResourceName, + } + + resourceInstance, err := getExactResource(ctx, dynamicClient, targetRef) + if apierrors.IsNotFound(err) { + continue + } + if err != nil { + errs = append(errs, err) + continue + } + + instances.Insert(resourceInstance) + } + } + } + + return instances.List(), errors.Join(errs...) +} + +func getExactResource(ctx context.Context, dynamicClient dynamic.Interface, resourceReference ExactResourceID) (*Resource, error) { + gvr := schema.GroupVersionResource{Group: resourceReference.Group, Version: resourceReference.Version, Resource: resourceReference.Resource} + unstructuredInstance, err := dynamicClient.Resource(gvr).Namespace(resourceReference.Namespace).Get(ctx, resourceReference.Name, metav1.GetOptions{}) + if err != nil { + return nil, fmt.Errorf("failed getting %v: %w", IdentifierForExactResourceRef(&resourceReference), err) + } + + resourceInstance := &Resource{ + ResourceType: gvr, + Content: unstructuredInstance, + } + return resourceInstance, nil +} + +func getResourcesByLabelSelector(ctx context.Context, dynamicClient dynamic.Interface, labelSelectedResource LabelSelectedResource) ([]*Resource, error) { + gvr := schema.GroupVersionResource{ + Group: labelSelectedResource.Group, + Version: labelSelectedResource.Version, + Resource: labelSelectedResource.Resource, + } + + selector, err := metav1.LabelSelectorAsSelector(&labelSelectedResource.LabelSelector) + if err != nil { + return nil, err + } + + namespace := labelSelectedResource.Namespace + if namespace == "" { + namespace = metav1.NamespaceAll + } + + unstructuredList, err := dynamicClient.Resource(gvr).Namespace(namespace).List(ctx, metav1.ListOptions{LabelSelector: selector.String()}) + if err != nil { + return nil, fmt.Errorf("failed getting list of resources with labelSelector %q: %w", selector, err) + } + + var resources []*Resource + for _, item := range unstructuredList.Items { + resourceInstance := &Resource{ + ResourceType: gvr, + Content: &item, + } + resources = append(resources, resourceInstance) + } + + return resources, nil +} + +func IdentifierForExactResourceRef(resourceReference *ExactResourceID) string { + return fmt.Sprintf("%s.%s.%s/%s[%s]", resourceReference.Resource, resourceReference.Version, resourceReference.Group, resourceReference.Name, resourceReference.Namespace) +} + +func unstructuredToMustGatherFormat(in []*Resource) ([]*Resource, error) { + type mustGatherKeyType struct { + gk schema.GroupKind + namespace string + } + + versionsByGroupKind := map[schema.GroupKind]sets.Set[string]{} + groupKindToResource := map[schema.GroupKind]schema.GroupVersionResource{} + byGroupKind := map[mustGatherKeyType]*unstructured.UnstructuredList{} + for _, curr := range in { + gvk := curr.Content.GroupVersionKind() + groupKind := curr.Content.GroupVersionKind().GroupKind() + existingVersions, ok := versionsByGroupKind[groupKind] + if !ok { + existingVersions = sets.New[string]() + versionsByGroupKind[groupKind] = existingVersions + } + existingVersions.Insert(gvk.Version) + groupKindToResource[groupKind] = curr.ResourceType + + mustGatherKey := mustGatherKeyType{ + gk: groupKind, + namespace: curr.Content.GetNamespace(), + } + existing, ok := byGroupKind[mustGatherKey] + if !ok { + existing = &unstructured.UnstructuredList{ + Object: map[string]interface{}{}, + } + listGVK := guessListKind(curr.Content) + existing.GetObjectKind().SetGroupVersionKind(listGVK) + byGroupKind[mustGatherKey] = existing + } + existing.Items = append(existing.Items, *curr.Content.DeepCopy()) + } + + errs := []error{} + for groupKind, currVersions := range versionsByGroupKind { + if len(currVersions) == 1 { + continue + } + errs = append(errs, fmt.Errorf("groupKind=%v has multiple versions: %v, which prevents serialization", groupKind, sets.List(currVersions))) + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + ret := []*Resource{} + for mustGatherKey, list := range byGroupKind { + namespacedString := "REPLACE_ME" + if len(mustGatherKey.namespace) > 0 { + namespacedString = "namespaces" + } else { + namespacedString = "cluster-scoped-resources" + } + + groupString := mustGatherKey.gk.Group + if len(groupString) == 0 { + groupString = "core" + } + listAsUnstructured := &unstructured.Unstructured{Object: list.UnstructuredContent()} + resourceType := groupKindToResource[mustGatherKey.gk] + ret = append(ret, &Resource{ + Filename: path.Join(namespacedString, mustGatherKey.namespace, groupString, fmt.Sprintf("%s.yaml", resourceType.Resource)), + Content: listAsUnstructured, + ResourceType: resourceType, + }) + } + + return ret, nil +} + +func guessListKind(in *unstructured.Unstructured) schema.GroupVersionKind { + return schema.GroupVersionKind{ + Group: in.GroupVersionKind().Group, + Version: in.GroupVersionKind().Version, + Kind: in.GroupVersionKind().Kind + "List", + } +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/resource.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/resource.go new file mode 100644 index 000000000..67785935a --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/resource.go @@ -0,0 +1,273 @@ +package libraryinputresources + +import ( + "errors" + "fmt" + "io/fs" + "os" + "path" + "path/filepath" + "reflect" + "strings" + + "sigs.k8s.io/yaml" + + "github.com/google/go-cmp/cmp" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// TODO this is a good target to move to library-go so we all agree how to reference these. +type Resource struct { + Filename string + ResourceType schema.GroupVersionResource + Content *unstructured.Unstructured +} + +func (r Resource) ID() string { + name := r.Content.GetName() + namespace := r.Content.GetNamespace() + if namespace == "" { + namespace = "_cluster_scoped_resource_" + } + return fmt.Sprintf("%s/%s/%s/%s", r.ResourceType.Group, r.ResourceType.Resource, namespace, name) +} + +func discoverResourcesFromMustGather(mustGatherDir string) (map[schema.GroupVersionKind][]schema.GroupVersionResource, error) { + discoveryClient, err := NewDiscoveryClientFromMustGather(mustGatherDir) + if err != nil { + return nil, fmt.Errorf("failed creating discovery client: %w", err) + } + + _, gvToAPIResourceList, _, err := discoveryClient.GroupsAndMaybeResources() + if err != nil { + return nil, fmt.Errorf("failed to get api resource list with GroupsAndMaybeResources: %w", err) + } + + gvkToResources := map[schema.GroupVersionKind][]schema.GroupVersionResource{} + for gv, apiResourceList := range gvToAPIResourceList { + for _, apiResource := range apiResourceList.APIResources { + if strings.Contains(apiResource.Name, "/") { + // Skip subresources + continue + } + gvk := schema.GroupVersionKind{ + Group: gv.Group, + Version: gv.Version, + Kind: apiResource.Kind, + } + gvkToResources[gvk] = append(gvkToResources[gvk], schema.GroupVersionResource{ + Group: gv.Group, + Version: gv.Version, + Resource: apiResource.Name, + }) + } + } + return gvkToResources, nil +} + +func LenientResourcesFromDirRecursive(location string) ([]*Resource, error) { + gvkToResources, err := discoverResourcesFromMustGather(location) + if err != nil { + return nil, fmt.Errorf("failed to discover resources from must-gather: %w", err) + } + + currResourceList := []*Resource{} + errs := []error{} + err = filepath.WalkDir(location, func(currLocation string, currFile fs.DirEntry, err error) error { + if err != nil { + errs = append(errs, err) + } + + if currFile.IsDir() { + return nil + } + if !strings.HasSuffix(currFile.Name(), ".yaml") && !strings.HasSuffix(currFile.Name(), ".json") { + return nil + } + currResource, err := ResourcesFromFile(gvkToResources, currLocation, location) + if err != nil { + return fmt.Errorf("error deserializing %q: %w", currLocation, err) + } + currResourceList = append(currResourceList, currResource...) + + return nil + }) + if err != nil { + errs = append(errs, err) + } + + return currResourceList, errors.Join(errs...) +} + +func findGVR(gvkToResources map[schema.GroupVersionKind][]schema.GroupVersionResource, gvk schema.GroupVersionKind) (*schema.GroupVersionResource, error) { + resources := gvkToResources[gvk] + switch len(resources) { + case 1: + return &resources[0], nil + case 0: + return nil, fmt.Errorf("no resources found for Group: %q, Version: %q, Kind: %q", gvk.Group, gvk.Version, gvk.Kind) + default: + return nil, fmt.Errorf("multiple resources found for Group: %q, Version: %q, Kind: %q", gvk.Group, gvk.Version, gvk.Kind) + } +} + +func ResourcesFromFile(gvkToResources map[schema.GroupVersionKind][]schema.GroupVersionResource, location, fileTrimPrefix string) ([]*Resource, error) { + content, err := os.ReadFile(location) + if err != nil { + return nil, fmt.Errorf("unable to read %q: %w", location, err) + } + + ret, _, jsonErr := unstructured.UnstructuredJSONScheme.Decode(content, nil, &unstructured.Unstructured{}) + if jsonErr != nil { + // try to see if it's yaml + jsonString, err := yaml.YAMLToJSON(content) + if err != nil { + return nil, fmt.Errorf("unable to decode %q as json: %w", location, jsonErr) + } + ret, _, err = unstructured.UnstructuredJSONScheme.Decode(jsonString, nil, &unstructured.Unstructured{}) + if err != nil { + return nil, fmt.Errorf("unable to decode %q as yaml: %w", location, err) + } + } + + retFilename := strings.TrimPrefix(location, fileTrimPrefix) + retFilename = strings.TrimPrefix(retFilename, "/") + retContent := ret.(*unstructured.Unstructured) + + resource := &Resource{ + Filename: retFilename, + Content: retContent, + } + + // Short-circuit if the file contains a single resource + if !resource.Content.IsList() { + gvk := retContent.GroupVersionKind() + gvr, err := findGVR(gvkToResources, gvk) + if err != nil { + return nil, fmt.Errorf("failed to find gvr: %w", err) + } + resource.ResourceType = *gvr + return []*Resource{resource}, nil + } + + // Unpack if the file contains a list of resources + list, err := resource.Content.ToList() + if err != nil { + return nil, fmt.Errorf("unable to convert resource content to list: %w", err) + } + + resources := make([]*Resource, 0, len(list.Items)) + for _, item := range list.Items { + gvr, err := findGVR(gvkToResources, item.GroupVersionKind()) + if err != nil { + return nil, fmt.Errorf("failed to find gvr: %w", err) + } + resources = append(resources, &Resource{ + Filename: resource.Filename, + Content: &item, + ResourceType: *gvr, + }) + } + + return resources, nil +} + +func IdentifyResource(in *Resource) string { + gvkString := fmt.Sprintf("%s.%s.%s/%s[%s]", in.Content.GroupVersionKind().Kind, in.Content.GroupVersionKind().Version, in.Content.GroupVersionKind().Group, in.Content.GetName(), in.Content.GetNamespace()) + + return fmt.Sprintf("%s(%s)", gvkString, in.Filename) +} + +func WriteResource(in *Resource, parentDir string) error { + if len(in.Filename) == 0 { + return fmt.Errorf("%s is missing filename", IdentifyResource(in)) + } + + dir := path.Join(parentDir, path.Dir(in.Filename)) + if err := os.MkdirAll(dir, 0755); err != nil { + return fmt.Errorf("error creating dir for %v: %w", IdentifyResource(in), err) + } + + file := path.Join(parentDir, in.Filename) + resourceYaml, err := yaml.Marshal(in.Content) + if err != nil { + return fmt.Errorf("error serializing %v: %w", IdentifyResource(in), err) + } + if err := os.WriteFile(file, resourceYaml, 0644); err != nil { + return fmt.Errorf("error writing %v: %w", IdentifyResource(in), err) + } + + return nil +} + +func EquivalentResources(field string, lhses, rhses []*Resource) []string { + reasons := []string{} + + for i := range lhses { + lhs := lhses[i] + rhs := findResource(rhses, lhs.Filename) + + if rhs == nil { + reasons = append(reasons, fmt.Sprintf("%v[%d]: %q missing in rhs", field, i, lhs.Filename)) + continue + } + if !reflect.DeepEqual(lhs.Content, rhs.Content) { + reasons = append(reasons, fmt.Sprintf("%v[%d]: does not match: %v", field, i, cmp.Diff(lhs.Content, rhs.Content))) + } + } + + for i := range rhses { + rhs := rhses[i] + lhs := findResource(lhses, rhs.Filename) + + if lhs == nil { + reasons = append(reasons, fmt.Sprintf("%v[%d]: %q missing in lhs", field, i, rhs.Filename)) + continue + } + } + + return reasons +} + +func findResource(in []*Resource, filename string) *Resource { + for _, curr := range in { + if curr.Filename == filename { + return curr + } + } + + return nil +} + +func NewUniqueResourceSet(resources ...*Resource) *UniqueResourceSet { + u := &UniqueResourceSet{ + seen: sets.New[string](), + resources: []*Resource{}, + } + u.Insert(resources...) + return u +} + +type UniqueResourceSet struct { + seen sets.Set[string] + resources []*Resource +} + +func (u *UniqueResourceSet) Insert(resources ...*Resource) { + for _, resource := range resources { + if resource == nil { + continue + } + if u.seen.Has(resource.ID()) { + continue + } + u.resources = append(u.resources, resource) + u.seen.Insert(resource.ID()) + } +} + +func (u *UniqueResourceSet) List() []*Resource { + return u.resources +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/types.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/types.go new file mode 100644 index 000000000..c9ce14e26 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/types.go @@ -0,0 +1,110 @@ +package libraryinputresources + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// InputResources contains the items that an operator needs to make a decision about what needs to be create, +// modified, or removed. +type InputResources struct { + // applyConfigurationResources are the list of resources used as input to the apply-configuration command. + // It is the responsibility of the MOM to determine where the inputs come from. + ApplyConfigurationResources ResourceList `json:"applyConfigurationResources,omitempty"` + + // operandResources is the list of resources that are important for determining check-health + OperandResources OperandResourceList `json:"operandResources,omitempty"` +} + +type ResourceList struct { + ExactResources []ExactResourceID `json:"exactResources,omitempty"` + + GeneratedNameResources []GeneratedResourceID `json:"generatedNameResources,omitempty"` + + LabelSelectedResources []LabelSelectedResource `json:"labelSelectedResources,omitempty"` + + // use resourceReferences when one resource (apiserver.config.openshift.io/cluster) refers to another resource + // like a secret (.spec.servingCerts.namedCertificates[*].servingCertificates.name). + ResourceReferences []ResourceReference `json:"resourceReferences,omitempty"` +} + +type LabelSelectedResource struct { + InputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace,omitempty"` + + // validation prevents setting matchExpressions + LabelSelector metav1.LabelSelector `json:"labelSelector"` +} + +type OperandResourceList struct { + ConfigurationResources ResourceList `json:"configurationResources,omitempty"` + ManagementResources ResourceList `json:"managementResources,omitempty"` + UserWorkloadResources ResourceList `json:"userWorkloadResources,omitempty"` +} + +type ExactResourceID struct { + InputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace,omitempty"` + Name string `json:"name"` +} + +type GeneratedResourceID struct { + InputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace,omitempty"` + GeneratedName string `json:"name"` +} + +type ResourceReference struct { + // TODO determine if we need the ability to select multiple containing resources. I don’t think we’ll need to given the shape of our configuration. + ReferringResource ExactResourceID `json:"referringResource"` + + Type ResourceReferenceType `json:"type"` + + ExplicitNamespacedReference *ExplicitNamespacedReference `json:"explicitNamespacedReference,omitempty"` + ImplicitNamespacedReference *ImplicitNamespacedReference `json:"implicitNamespacedReference,omitempty"` + ClusterScopedReference *ClusterScopedReference `json:"clusterScopedReference,omitempty"` +} + +type ResourceReferenceType string + +const ( + ExplicitNamespacedReferenceType ResourceReferenceType = "ExplicitNamespacedReference" + ImplicitNamespacedReferenceType ResourceReferenceType = "ImplicitNamespacedReference" + ClusterScopedReferenceType ResourceReferenceType = "ClusterScopedReference" +) + +type ExplicitNamespacedReference struct { + InputResourceTypeIdentifier `json:",inline"` + + // may have multiple matches + // TODO CEL may be more appropriate + NamespaceJSONPath string `json:"namespaceJSONPath"` + NameJSONPath string `json:"nameJSONPath"` +} + +type ImplicitNamespacedReference struct { + InputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace"` + // may have multiple matches + // TODO CEL may be more appropriate + NameJSONPath string `json:"nameJSONPath"` +} + +type ClusterScopedReference struct { + InputResourceTypeIdentifier `json:",inline"` + + // may have multiple matches + // TODO CEL may be more appropriate + NameJSONPath string `json:"nameJSONPath"` +} + +type InputResourceTypeIdentifier struct { + Group string `json:"group"` + // version is very important because it must match the version of serialization that your operator expects. + // All Group,Resource tuples must use the same Version. + Version string `json:"version"` + Resource string `json:"resource"` +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/validation.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/validation.go new file mode 100644 index 000000000..7697afcfd --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources/validation.go @@ -0,0 +1,103 @@ +package libraryinputresources + +import "k8s.io/apimachinery/pkg/util/validation/field" + +func validateInputResources(obj *InputResources) []error { + errs := []error{} + + errs = append(errs, validateResourceList(field.NewPath("applyConfigurationResources"), obj.ApplyConfigurationResources)...) + errs = append(errs, validateOperandResourceList(field.NewPath("operandResources"), obj.OperandResources)...) + + return errs +} + +func validateOperandResourceList(path *field.Path, obj OperandResourceList) []error { + errs := []error{} + + errs = append(errs, validateResourceList(path.Child("configurationResources"), obj.ConfigurationResources)...) + errs = append(errs, validateResourceList(path.Child("managementResources"), obj.ManagementResources)...) + errs = append(errs, validateResourceList(path.Child("userWorkloadResources"), obj.UserWorkloadResources)...) + + return errs +} + +func validateResourceList(path *field.Path, obj ResourceList) []error { + errs := []error{} + + for i, curr := range obj.ExactResources { + errs = append(errs, validateExactResourceID(path.Child("exactResources").Index(i), curr)...) + } + for i, curr := range obj.LabelSelectedResources { + errs = append(errs, validateLabelSelectedResources(path.Child("labelSelectedResources").Index(i), curr)...) + } + for i, curr := range obj.ResourceReferences { + errs = append(errs, validateResourceReference(path.Child("resourceReferences").Index(i), curr)...) + } + + return errs +} + +func validateExactResourceID(path *field.Path, obj ExactResourceID) []error { + errs := []error{} + + errs = append(errs, validateInputResourceTypeIdentifier(path, obj.InputResourceTypeIdentifier)...) + if len(obj.Name) == 0 { + errs = append(errs, field.Required(path.Child("name"), "must be present")) + } + + return errs +} + +func validateLabelSelectedResources(path *field.Path, obj LabelSelectedResource) []error { + errs := []error{} + + errs = append(errs, validateInputResourceTypeIdentifier(path, obj.InputResourceTypeIdentifier)...) + if len(obj.LabelSelector.MatchExpressions) > 0 { + errs = append(errs, field.Forbidden(path.Child("matchExpressions"), "not supported")) + } + return errs +} + +func validateInputResourceTypeIdentifier(path *field.Path, obj InputResourceTypeIdentifier) []error { + errs := []error{} + + if len(obj.Version) == 0 { + errs = append(errs, field.Required(path.Child("version"), "must be present")) + } + if len(obj.Resource) == 0 { + errs = append(errs, field.Required(path.Child("resource"), "must be present")) + } + + return errs +} + +func validateResourceReference(path *field.Path, obj ResourceReference) []error { + errs := []error{} + + errs = append(errs, validateExactResourceID(path.Child("referringResource"), obj.ReferringResource)...) + + switch obj.Type { + case ImplicitNamespacedReferenceType: + errs = append(errs, validateImplicitNamespaceReference(path.Child("implicitNamespacedReference"), obj.ImplicitNamespacedReference)...) + default: + errs = append(errs, field.NotSupported(path.Child("type"), obj.Type, []ResourceReferenceType{ImplicitNamespacedReferenceType})) + } + + return errs +} + +func validateImplicitNamespaceReference(path *field.Path, obj *ImplicitNamespacedReference) []error { + errs := []error{} + + errs = append(errs, validateInputResourceTypeIdentifier(path, obj.InputResourceTypeIdentifier)...) + if len(obj.Namespace) == 0 { + errs = append(errs, field.Required(path.Child("namespace"), "must be present")) + } + + _, err := builder.NewEvaluable(obj.NameJSONPath) + if err != nil { + errs = append(errs, field.Invalid(path.Child("nameJSONPath"), obj.NameJSONPath, err.Error())) + } + + return errs +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/command.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/command.go new file mode 100644 index 000000000..f53f7588d --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/command.go @@ -0,0 +1,74 @@ +package libraryoutputresources + +import ( + "context" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +type OutputResourcesFunc func(ctx context.Context) (*OutputResources, error) + +func NewOutputResourcesCommand(outputResourcesFn OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + return newOutputResourcesCommand(outputResourcesFn, streams) +} + +type outputResourcesFlags struct { + outputResources OutputResourcesFunc + + streams genericiooptions.IOStreams +} + +func newOutputResourcesFlags(streams genericiooptions.IOStreams) *outputResourcesFlags { + return &outputResourcesFlags{ + streams: streams, + } +} + +func newOutputResourcesCommand(outputResources OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + f := newOutputResourcesFlags(streams) + f.outputResources = outputResources + + cmd := &cobra.Command{ + Use: "output-resources", + Short: "List of resources that this operator outputs and the type of cluster those modifications should be applied to.", + + SilenceUsage: true, + SilenceErrors: true, + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := f.Validate(); err != nil { + return err + } + o, err := f.ToOptions(ctx) + if err != nil { + return err + } + if err := o.Run(ctx); err != nil { + return err + } + return nil + }, + } + + f.BindFlags(cmd.Flags()) + + return cmd +} + +func (f *outputResourcesFlags) BindFlags(flags *pflag.FlagSet) { +} + +func (f *outputResourcesFlags) Validate() error { + return nil +} + +func (f *outputResourcesFlags) ToOptions(ctx context.Context) (*outputResourcesOptions, error) { + return newOutputResourcesOptions( + f.outputResources, + f.streams, + ), + nil +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/easy_creation.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/easy_creation.go new file mode 100644 index 000000000..503700ff7 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/easy_creation.go @@ -0,0 +1,93 @@ +package libraryoutputresources + +func ExactResource(group, version, resource, namespace, name string) ExactResourceID { + return ExactResourceID{ + OutputResourceTypeIdentifier: OutputResourceTypeIdentifier{ + Group: group, + Version: version, + Resource: resource, + }, + Namespace: namespace, + Name: name, + } +} + +func GeneratedResource(group, version, resource, namespace, name string) GeneratedResourceID { + return GeneratedResourceID{ + OutputResourceTypeIdentifier: OutputResourceTypeIdentifier{ + Group: group, + Version: version, + Resource: resource, + }, + Namespace: namespace, + GeneratedName: name, + } +} + +func ExactSecret(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "secrets", namespace, name) +} + +func ExactConfigMap(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "configmaps", namespace, name) +} + +func ExactNamespace(name string) ExactResourceID { + return ExactResource("", "v1", "namespaces", "", name) +} + +func ExactServiceAccount(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "serviceaccounts", namespace, name) +} + +func ExactDeployment(namespace, name string) ExactResourceID { + return ExactResource("apps", "v1", "deployments", namespace, name) +} + +func ExactDaemonSet(namespace, name string) ExactResourceID { + return ExactResource("apps", "v1", "daemonsets", namespace, name) +} + +func ExactClusterOperator(name string) ExactResourceID { + return ExactResource("config.openshift.io", "v1", "clusteroperators", "", name) +} + +func ExactLowLevelOperator(resource string) ExactResourceID { + return ExactResource("operator.openshift.io", "v1", resource, "", "cluster") +} + +func ExactClusterRole(name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "clusterroles", "", name) +} + +func ExactClusterRoleBinding(name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "clusterrolebindings", "", name) +} + +func ExactRole(namespace, name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "roles", namespace, name) +} + +func ExactRoleBinding(namespace, name string) ExactResourceID { + return ExactResource("rbac.authorization.k8s.io", "v1", "rolebindings", namespace, name) +} + +func ExactConfigResource(resource string) ExactResourceID { + return ExactResource("config.openshift.io", "v1", resource, "", "cluster") +} + +func GeneratedCSR(generateName string) GeneratedResourceID { + return GeneratedResource("certificates.k8s.io", "v1", "certificatesigningrequests", "", generateName) +} + +func ExactPDB(namespace, name string) ExactResourceID { + return ExactResource("policy", "v1", "poddisruptionbudgets", namespace, name) +} + +func ExactService(namespace, name string) ExactResourceID { + return ExactResource("", "v1", "services", namespace, name) +} + +func ExactOAuthClient(name string) ExactResourceID { + return ExactResource("oauth.openshift.io", "v1", "oauthclients", "", name) +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/options.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/options.go new file mode 100644 index 000000000..82ccb6107 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/options.go @@ -0,0 +1,40 @@ +package libraryoutputresources + +import ( + "context" + "fmt" + + "k8s.io/cli-runtime/pkg/genericiooptions" + "sigs.k8s.io/yaml" +) + +type outputResourcesOptions struct { + outputResourcesFn OutputResourcesFunc + + streams genericiooptions.IOStreams +} + +func newOutputResourcesOptions(outputResourcesFn OutputResourcesFunc, streams genericiooptions.IOStreams) *outputResourcesOptions { + return &outputResourcesOptions{ + outputResourcesFn: outputResourcesFn, + streams: streams, + } +} + +func (o *outputResourcesOptions) Run(ctx context.Context) error { + result, err := o.outputResourcesFn(ctx) + if err != nil { + return err + } + + outputResourcesYAML, err := yaml.Marshal(result) + if err != nil { + return err + } + + if _, err := fmt.Fprint(o.streams.Out, string(outputResourcesYAML)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/types.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/types.go new file mode 100644 index 000000000..54a3c0442 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources/types.go @@ -0,0 +1,59 @@ +package libraryoutputresources + +// OutputResources is a list of resources that an operator will need to mutate from apply-configuration and apply-configuration-live. +// This needs to be a complete list. Any resource not present in this list will not be mutable for this operator. +type OutputResources struct { + // configurationResources are targeted at the cluster where configuration is held. + // On standalone, this is the one cluster. + // On HCP, this is logically a view into the resources in the namespace of the guest cluster. + ConfigurationResources ResourceList `json:"configurationResources,omitempty"` + // managementResources are targeted at the cluster where management plane responsibility is held. + // On standalone, this is the one cluster. + // On HCP, this is logically resources in the namespace of the guest cluster: usually the control plane aspects. + ManagementResources ResourceList `json:"managementResources,omitempty"` + // UserWorkloadResources are targeted at the cluster where user workloads run. + // On standalone, this is the one cluster. + // On HCP, this is the guest cluster. + UserWorkloadResources ResourceList `json:"userWorkloadResources,omitempty"` +} + +type ResourceList struct { + // exactResources are lists of exact names that are mutated + ExactResources []ExactResourceID `json:"exactResources,omitempty"` + + // generatedNameResources are lists of generatedNames that are mutated. + // These are also honored on non-creates, via prefix matching, but *only* on resource with generatedNames. + // This is not a cheat code for prefix matching. + GeneratedNameResources []GeneratedResourceID `json:"generatedNameResources,omitempty"` + + // eventingNamespaces holds a list of namespaces that the operator can output event into. + // This allows redirection of events to a particular cluster on a per-namespace level. + // For instance, the openshift-authentication-operator can go to management, but openshift-authentication can go + // to the userWorkload cluster. + EventingNamespaces []string `json:"eventingNamespaces,omitempty"` + + // TODO I bet this covers 95% of what we need, but maybe we need label selector. + // I'm a solid -1 on "pattern" based selection. We select in kube based on label selectors. +} + +type ExactResourceID struct { + OutputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace,omitempty"` + Name string `json:"name"` +} + +type GeneratedResourceID struct { + OutputResourceTypeIdentifier `json:",inline"` + + Namespace string `json:"namespace,omitempty"` + GeneratedName string `json:"name"` +} + +// OutputResourceTypeIdentifier does *not* include version, because the serialization doesn't matter for production. +// We'll be able to read the file and see how it is serialized. +type OutputResourceTypeIdentifier struct { + Group string `json:"group"` + Version string `json:"version"` + Resource string `json:"resource"` +} diff --git a/vendor/github.com/shopspring/decimal/.gitignore b/vendor/github.com/shopspring/decimal/.gitignore new file mode 100644 index 000000000..ff36b987f --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.gitignore @@ -0,0 +1,9 @@ +.git +*.swp + +# IntelliJ +.idea/ +*.iml + +# VS code +*.code-workspace diff --git a/vendor/github.com/shopspring/decimal/.travis.yml b/vendor/github.com/shopspring/decimal/.travis.yml new file mode 100644 index 000000000..6326d40f0 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/.travis.yml @@ -0,0 +1,19 @@ +language: go + +arch: + - amd64 + - ppc64le + +go: + - 1.7.x + - 1.14.x + - 1.15.x + - 1.16.x + - 1.17.x + - tip + +install: + - go build . + +script: + - go test -v diff --git a/vendor/github.com/shopspring/decimal/CHANGELOG.md b/vendor/github.com/shopspring/decimal/CHANGELOG.md new file mode 100644 index 000000000..aea61154b --- /dev/null +++ b/vendor/github.com/shopspring/decimal/CHANGELOG.md @@ -0,0 +1,49 @@ +## Decimal v1.3.1 + +#### ENHANCEMENTS +- Reduce memory allocation in case of initialization from big.Int [#252](https://github.com/shopspring/decimal/pull/252) + +#### BUGFIXES +- Fix binary marshalling of decimal zero value [#253](https://github.com/shopspring/decimal/pull/253) + +## Decimal v1.3.0 + +#### FEATURES +- Add NewFromFormattedString initializer [#184](https://github.com/shopspring/decimal/pull/184) +- Add NewNullDecimal initializer [#234](https://github.com/shopspring/decimal/pull/234) +- Add implementation of natural exponent function (Taylor, Hull-Abraham) [#229](https://github.com/shopspring/decimal/pull/229) +- Add RoundUp, RoundDown, RoundCeil, RoundFloor methods [#196](https://github.com/shopspring/decimal/pull/196) [#202](https://github.com/shopspring/decimal/pull/202) [#220](https://github.com/shopspring/decimal/pull/220) +- Add XML support for NullDecimal [#192](https://github.com/shopspring/decimal/pull/192) +- Add IsInteger method [#179](https://github.com/shopspring/decimal/pull/179) +- Add Copy helper method [#123](https://github.com/shopspring/decimal/pull/123) +- Add InexactFloat64 helper method [#205](https://github.com/shopspring/decimal/pull/205) +- Add CoefficientInt64 helper method [#244](https://github.com/shopspring/decimal/pull/244) + +#### ENHANCEMENTS +- Performance optimization of NewFromString init method [#198](https://github.com/shopspring/decimal/pull/198) +- Performance optimization of Abs and Round methods [#240](https://github.com/shopspring/decimal/pull/240) +- Additional tests (CI) for ppc64le architecture [#188](https://github.com/shopspring/decimal/pull/188) + +#### BUGFIXES +- Fix rounding in FormatFloat fallback path (roundShortest method, fix taken from Go main repository) [#161](https://github.com/shopspring/decimal/pull/161) +- Add slice range checks to UnmarshalBinary method [#232](https://github.com/shopspring/decimal/pull/232) + +## Decimal v1.2.0 + +#### BREAKING +- Drop support for Go version older than 1.7 [#172](https://github.com/shopspring/decimal/pull/172) + +#### FEATURES +- Add NewFromInt and NewFromInt32 initializers [#72](https://github.com/shopspring/decimal/pull/72) +- Add support for Go modules [#157](https://github.com/shopspring/decimal/pull/157) +- Add BigInt, BigFloat helper methods [#171](https://github.com/shopspring/decimal/pull/171) + +#### ENHANCEMENTS +- Memory usage optimization [#160](https://github.com/shopspring/decimal/pull/160) +- Updated travis CI golang versions [#156](https://github.com/shopspring/decimal/pull/156) +- Update documentation [#173](https://github.com/shopspring/decimal/pull/173) +- Improve code quality [#174](https://github.com/shopspring/decimal/pull/174) + +#### BUGFIXES +- Revert remove insignificant digits [#159](https://github.com/shopspring/decimal/pull/159) +- Remove 15 interval for RoundCash [#166](https://github.com/shopspring/decimal/pull/166) diff --git a/vendor/github.com/shopspring/decimal/LICENSE b/vendor/github.com/shopspring/decimal/LICENSE new file mode 100644 index 000000000..ad2148aaf --- /dev/null +++ b/vendor/github.com/shopspring/decimal/LICENSE @@ -0,0 +1,45 @@ +The MIT License (MIT) + +Copyright (c) 2015 Spring, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +- Based on https://github.com/oguzbilgic/fpd, which has the following license: +""" +The MIT License (MIT) + +Copyright (c) 2013 Oguz Bilgic + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +""" diff --git a/vendor/github.com/shopspring/decimal/README.md b/vendor/github.com/shopspring/decimal/README.md new file mode 100644 index 000000000..2e35df068 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/README.md @@ -0,0 +1,130 @@ +# decimal + +[![Build Status](https://app.travis-ci.com/shopspring/decimal.svg?branch=master)](https://app.travis-ci.com/shopspring/decimal) [![GoDoc](https://godoc.org/github.com/shopspring/decimal?status.svg)](https://godoc.org/github.com/shopspring/decimal) [![Go Report Card](https://goreportcard.com/badge/github.com/shopspring/decimal)](https://goreportcard.com/report/github.com/shopspring/decimal) + +Arbitrary-precision fixed-point decimal numbers in go. + +_Note:_ Decimal library can "only" represent numbers with a maximum of 2^31 digits after the decimal point. + +## Features + + * The zero-value is 0, and is safe to use without initialization + * Addition, subtraction, multiplication with no loss of precision + * Division with specified precision + * Database/sql serialization/deserialization + * JSON and XML serialization/deserialization + +## Install + +Run `go get github.com/shopspring/decimal` + +## Requirements + +Decimal library requires Go version `>=1.7` + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/shopspring/decimal" +) + +func main() { + price, err := decimal.NewFromString("136.02") + if err != nil { + panic(err) + } + + quantity := decimal.NewFromInt(3) + + fee, _ := decimal.NewFromString(".035") + taxRate, _ := decimal.NewFromString(".08875") + + subtotal := price.Mul(quantity) + + preTax := subtotal.Mul(fee.Add(decimal.NewFromFloat(1))) + + total := preTax.Mul(taxRate.Add(decimal.NewFromFloat(1))) + + fmt.Println("Subtotal:", subtotal) // Subtotal: 408.06 + fmt.Println("Pre-tax:", preTax) // Pre-tax: 422.3421 + fmt.Println("Taxes:", total.Sub(preTax)) // Taxes: 37.482861375 + fmt.Println("Total:", total) // Total: 459.824961375 + fmt.Println("Tax rate:", total.Sub(preTax).Div(preTax)) // Tax rate: 0.08875 +} +``` + +## Documentation + +http://godoc.org/github.com/shopspring/decimal + +## Production Usage + +* [Spring](https://shopspring.com/), since August 14, 2014. +* If you are using this in production, please let us know! + +## FAQ + +#### Why don't you just use float64? + +Because float64 (or any binary floating point type, actually) can't represent +numbers such as `0.1` exactly. + +Consider this code: http://play.golang.org/p/TQBd4yJe6B You might expect that +it prints out `10`, but it actually prints `9.999999999999831`. Over time, +these small errors can really add up! + +#### Why don't you just use big.Rat? + +big.Rat is fine for representing rational numbers, but Decimal is better for +representing money. Why? Here's a (contrived) example: + +Let's say you use big.Rat, and you have two numbers, x and y, both +representing 1/3, and you have `z = 1 - x - y = 1/3`. If you print each one +out, the string output has to stop somewhere (let's say it stops at 3 decimal +digits, for simplicity), so you'll get 0.333, 0.333, and 0.333. But where did +the other 0.001 go? + +Here's the above example as code: http://play.golang.org/p/lCZZs0w9KE + +With Decimal, the strings being printed out represent the number exactly. So, +if you have `x = y = 1/3` (with precision 3), they will actually be equal to +0.333, and when you do `z = 1 - x - y`, `z` will be equal to .334. No money is +unaccounted for! + +You still have to be careful. If you want to split a number `N` 3 ways, you +can't just send `N/3` to three different people. You have to pick one to send +`N - (2/3*N)` to. That person will receive the fraction of a penny remainder. + +But, it is much easier to be careful with Decimal than with big.Rat. + +#### Why isn't the API similar to big.Int's? + +big.Int's API is built to reduce the number of memory allocations for maximal +performance. This makes sense for its use-case, but the trade-off is that the +API is awkward and easy to misuse. + +For example, to add two big.Ints, you do: `z := new(big.Int).Add(x, y)`. A +developer unfamiliar with this API might try to do `z := a.Add(a, b)`. This +modifies `a` and sets `z` as an alias for `a`, which they might not expect. It +also modifies any other aliases to `a`. + +Here's an example of the subtle bugs you can introduce with big.Int's API: +https://play.golang.org/p/x2R_78pa8r + +In contrast, it's difficult to make such mistakes with decimal. Decimals +behave like other go numbers types: even though `a = b` will not deep copy +`b` into `a`, it is impossible to modify a Decimal, since all Decimal methods +return new Decimals and do not modify the originals. The downside is that +this causes extra allocations, so Decimal is less performant. My assumption +is that if you're using Decimals, you probably care more about correctness +than performance. + +## License + +The MIT License (MIT) + +This is a heavily modified fork of [fpd.Decimal](https://github.com/oguzbilgic/fpd), which was also released under the MIT License. diff --git a/vendor/github.com/shopspring/decimal/decimal-go.go b/vendor/github.com/shopspring/decimal/decimal-go.go new file mode 100644 index 000000000..9958d6902 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal-go.go @@ -0,0 +1,415 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type decimal struct { + d [800]byte // digits, big-endian representation + nd int // number of digits used + dp int // decimal point + neg bool // negative flag + trunc bool // discarded nonzero digits beyond d[:nd] +} + +func (a *decimal) String() string { + n := 10 + a.nd + if a.dp > 0 { + n += a.dp + } + if a.dp < 0 { + n += -a.dp + } + + buf := make([]byte, n) + w := 0 + switch { + case a.nd == 0: + return "0" + + case a.dp <= 0: + // zeros fill space between decimal point and digits + buf[w] = '0' + w++ + buf[w] = '.' + w++ + w += digitZero(buf[w : w+-a.dp]) + w += copy(buf[w:], a.d[0:a.nd]) + + case a.dp < a.nd: + // decimal point in middle of digits + w += copy(buf[w:], a.d[0:a.dp]) + buf[w] = '.' + w++ + w += copy(buf[w:], a.d[a.dp:a.nd]) + + default: + // zeros fill space between digits and decimal point + w += copy(buf[w:], a.d[0:a.nd]) + w += digitZero(buf[w : w+a.dp-a.nd]) + } + return string(buf[0:w]) +} + +func digitZero(dst []byte) int { + for i := range dst { + dst[i] = '0' + } + return len(dst) +} + +// trim trailing zeros from number. +// (They are meaningless; the decimal point is tracked +// independent of the number of digits.) +func trim(a *decimal) { + for a.nd > 0 && a.d[a.nd-1] == '0' { + a.nd-- + } + if a.nd == 0 { + a.dp = 0 + } +} + +// Assign v to a. +func (a *decimal) Assign(v uint64) { + var buf [24]byte + + // Write reversed decimal in buf. + n := 0 + for v > 0 { + v1 := v / 10 + v -= 10 * v1 + buf[n] = byte(v + '0') + n++ + v = v1 + } + + // Reverse again to produce forward decimal in a.d. + a.nd = 0 + for n--; n >= 0; n-- { + a.d[a.nd] = buf[n] + a.nd++ + } + a.dp = a.nd + trim(a) +} + +// Maximum shift that we can do in one pass without overflow. +// A uint has 32 or 64 bits, and we have to be able to accommodate 9<> 63) +const maxShift = uintSize - 4 + +// Binary shift right (/ 2) by k bits. k <= maxShift to avoid overflow. +func rightShift(a *decimal, k uint) { + r := 0 // read pointer + w := 0 // write pointer + + // Pick up enough leading digits to cover first shift. + var n uint + for ; n>>k == 0; r++ { + if r >= a.nd { + if n == 0 { + // a == 0; shouldn't get here, but handle anyway. + a.nd = 0 + return + } + for n>>k == 0 { + n = n * 10 + r++ + } + break + } + c := uint(a.d[r]) + n = n*10 + c - '0' + } + a.dp -= r - 1 + + var mask uint = (1 << k) - 1 + + // Pick up a digit, put down a digit. + for ; r < a.nd; r++ { + c := uint(a.d[r]) + dig := n >> k + n &= mask + a.d[w] = byte(dig + '0') + w++ + n = n*10 + c - '0' + } + + // Put down extra digits. + for n > 0 { + dig := n >> k + n &= mask + if w < len(a.d) { + a.d[w] = byte(dig + '0') + w++ + } else if dig > 0 { + a.trunc = true + } + n = n * 10 + } + + a.nd = w + trim(a) +} + +// Cheat sheet for left shift: table indexed by shift count giving +// number of new digits that will be introduced by that shift. +// +// For example, leftcheats[4] = {2, "625"}. That means that +// if we are shifting by 4 (multiplying by 16), it will add 2 digits +// when the string prefix is "625" through "999", and one fewer digit +// if the string prefix is "000" through "624". +// +// Credit for this trick goes to Ken. + +type leftCheat struct { + delta int // number of new digits + cutoff string // minus one digit if original < a. +} + +var leftcheats = []leftCheat{ + // Leading digits of 1/2^i = 5^i. + // 5^23 is not an exact 64-bit floating point number, + // so have to use bc for the math. + // Go up to 60 to be large enough for 32bit and 64bit platforms. + /* + seq 60 | sed 's/^/5^/' | bc | + awk 'BEGIN{ print "\t{ 0, \"\" }," } + { + log2 = log(2)/log(10) + printf("\t{ %d, \"%s\" },\t// * %d\n", + int(log2*NR+1), $0, 2**NR) + }' + */ + {0, ""}, + {1, "5"}, // * 2 + {1, "25"}, // * 4 + {1, "125"}, // * 8 + {2, "625"}, // * 16 + {2, "3125"}, // * 32 + {2, "15625"}, // * 64 + {3, "78125"}, // * 128 + {3, "390625"}, // * 256 + {3, "1953125"}, // * 512 + {4, "9765625"}, // * 1024 + {4, "48828125"}, // * 2048 + {4, "244140625"}, // * 4096 + {4, "1220703125"}, // * 8192 + {5, "6103515625"}, // * 16384 + {5, "30517578125"}, // * 32768 + {5, "152587890625"}, // * 65536 + {6, "762939453125"}, // * 131072 + {6, "3814697265625"}, // * 262144 + {6, "19073486328125"}, // * 524288 + {7, "95367431640625"}, // * 1048576 + {7, "476837158203125"}, // * 2097152 + {7, "2384185791015625"}, // * 4194304 + {7, "11920928955078125"}, // * 8388608 + {8, "59604644775390625"}, // * 16777216 + {8, "298023223876953125"}, // * 33554432 + {8, "1490116119384765625"}, // * 67108864 + {9, "7450580596923828125"}, // * 134217728 + {9, "37252902984619140625"}, // * 268435456 + {9, "186264514923095703125"}, // * 536870912 + {10, "931322574615478515625"}, // * 1073741824 + {10, "4656612873077392578125"}, // * 2147483648 + {10, "23283064365386962890625"}, // * 4294967296 + {10, "116415321826934814453125"}, // * 8589934592 + {11, "582076609134674072265625"}, // * 17179869184 + {11, "2910383045673370361328125"}, // * 34359738368 + {11, "14551915228366851806640625"}, // * 68719476736 + {12, "72759576141834259033203125"}, // * 137438953472 + {12, "363797880709171295166015625"}, // * 274877906944 + {12, "1818989403545856475830078125"}, // * 549755813888 + {13, "9094947017729282379150390625"}, // * 1099511627776 + {13, "45474735088646411895751953125"}, // * 2199023255552 + {13, "227373675443232059478759765625"}, // * 4398046511104 + {13, "1136868377216160297393798828125"}, // * 8796093022208 + {14, "5684341886080801486968994140625"}, // * 17592186044416 + {14, "28421709430404007434844970703125"}, // * 35184372088832 + {14, "142108547152020037174224853515625"}, // * 70368744177664 + {15, "710542735760100185871124267578125"}, // * 140737488355328 + {15, "3552713678800500929355621337890625"}, // * 281474976710656 + {15, "17763568394002504646778106689453125"}, // * 562949953421312 + {16, "88817841970012523233890533447265625"}, // * 1125899906842624 + {16, "444089209850062616169452667236328125"}, // * 2251799813685248 + {16, "2220446049250313080847263336181640625"}, // * 4503599627370496 + {16, "11102230246251565404236316680908203125"}, // * 9007199254740992 + {17, "55511151231257827021181583404541015625"}, // * 18014398509481984 + {17, "277555756156289135105907917022705078125"}, // * 36028797018963968 + {17, "1387778780781445675529539585113525390625"}, // * 72057594037927936 + {18, "6938893903907228377647697925567626953125"}, // * 144115188075855872 + {18, "34694469519536141888238489627838134765625"}, // * 288230376151711744 + {18, "173472347597680709441192448139190673828125"}, // * 576460752303423488 + {19, "867361737988403547205962240695953369140625"}, // * 1152921504606846976 +} + +// Is the leading prefix of b lexicographically less than s? +func prefixIsLessThan(b []byte, s string) bool { + for i := 0; i < len(s); i++ { + if i >= len(b) { + return true + } + if b[i] != s[i] { + return b[i] < s[i] + } + } + return false +} + +// Binary shift left (* 2) by k bits. k <= maxShift to avoid overflow. +func leftShift(a *decimal, k uint) { + delta := leftcheats[k].delta + if prefixIsLessThan(a.d[0:a.nd], leftcheats[k].cutoff) { + delta-- + } + + r := a.nd // read index + w := a.nd + delta // write index + + // Pick up a digit, put down a digit. + var n uint + for r--; r >= 0; r-- { + n += (uint(a.d[r]) - '0') << k + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + // Put down extra digits. + for n > 0 { + quo := n / 10 + rem := n - 10*quo + w-- + if w < len(a.d) { + a.d[w] = byte(rem + '0') + } else if rem != 0 { + a.trunc = true + } + n = quo + } + + a.nd += delta + if a.nd >= len(a.d) { + a.nd = len(a.d) + } + a.dp += delta + trim(a) +} + +// Binary shift left (k > 0) or right (k < 0). +func (a *decimal) Shift(k int) { + switch { + case a.nd == 0: + // nothing to do: a == 0 + case k > 0: + for k > maxShift { + leftShift(a, maxShift) + k -= maxShift + } + leftShift(a, uint(k)) + case k < 0: + for k < -maxShift { + rightShift(a, maxShift) + k += maxShift + } + rightShift(a, uint(-k)) + } +} + +// If we chop a at nd digits, should we round up? +func shouldRoundUp(a *decimal, nd int) bool { + if nd < 0 || nd >= a.nd { + return false + } + if a.d[nd] == '5' && nd+1 == a.nd { // exactly halfway - round to even + // if we truncated, a little higher than what's recorded - always round up + if a.trunc { + return true + } + return nd > 0 && (a.d[nd-1]-'0')%2 != 0 + } + // not halfway - digit tells all + return a.d[nd] >= '5' +} + +// Round a to nd digits (or fewer). +// If nd is zero, it means we're rounding +// just to the left of the digits, as in +// 0.09 -> 0.1. +func (a *decimal) Round(nd int) { + if nd < 0 || nd >= a.nd { + return + } + if shouldRoundUp(a, nd) { + a.RoundUp(nd) + } else { + a.RoundDown(nd) + } +} + +// Round a down to nd digits (or fewer). +func (a *decimal) RoundDown(nd int) { + if nd < 0 || nd >= a.nd { + return + } + a.nd = nd + trim(a) +} + +// Round a up to nd digits (or fewer). +func (a *decimal) RoundUp(nd int) { + if nd < 0 || nd >= a.nd { + return + } + + // round up + for i := nd - 1; i >= 0; i-- { + c := a.d[i] + if c < '9' { // can stop after this digit + a.d[i]++ + a.nd = i + 1 + return + } + } + + // Number is all 9s. + // Change to single 1 with adjusted decimal point. + a.d[0] = '1' + a.nd = 1 + a.dp++ +} + +// Extract integer part, rounded appropriately. +// No guarantees about overflow. +func (a *decimal) RoundedInteger() uint64 { + if a.dp > 20 { + return 0xFFFFFFFFFFFFFFFF + } + var i int + n := uint64(0) + for i = 0; i < a.dp && i < a.nd; i++ { + n = n*10 + uint64(a.d[i]-'0') + } + for ; i < a.dp; i++ { + n *= 10 + } + if shouldRoundUp(a, a.dp) { + n++ + } + return n +} diff --git a/vendor/github.com/shopspring/decimal/decimal.go b/vendor/github.com/shopspring/decimal/decimal.go new file mode 100644 index 000000000..84405ec1c --- /dev/null +++ b/vendor/github.com/shopspring/decimal/decimal.go @@ -0,0 +1,1904 @@ +// Package decimal implements an arbitrary precision fixed-point decimal. +// +// The zero-value of a Decimal is 0, as you would expect. +// +// The best way to create a new Decimal is to use decimal.NewFromString, ex: +// +// n, err := decimal.NewFromString("-123.4567") +// n.String() // output: "-123.4567" +// +// To use Decimal as part of a struct: +// +// type Struct struct { +// Number Decimal +// } +// +// Note: This can "only" represent numbers with a maximum of 2^31 digits after the decimal point. +package decimal + +import ( + "database/sql/driver" + "encoding/binary" + "fmt" + "math" + "math/big" + "regexp" + "strconv" + "strings" +) + +// DivisionPrecision is the number of decimal places in the result when it +// doesn't divide exactly. +// +// Example: +// +// d1 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d1.String() // output: "0.6666666666666667" +// d2 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(30000)) +// d2.String() // output: "0.0000666666666667" +// d3 := decimal.NewFromFloat(20000).Div(decimal.NewFromFloat(3)) +// d3.String() // output: "6666.6666666666666667" +// decimal.DivisionPrecision = 3 +// d4 := decimal.NewFromFloat(2).Div(decimal.NewFromFloat(3)) +// d4.String() // output: "0.667" +// +var DivisionPrecision = 16 + +// MarshalJSONWithoutQuotes should be set to true if you want the decimal to +// be JSON marshaled as a number, instead of as a string. +// WARNING: this is dangerous for decimals with many digits, since many JSON +// unmarshallers (ex: Javascript's) will unmarshal JSON numbers to IEEE 754 +// double-precision floating point numbers, which means you can potentially +// silently lose precision. +var MarshalJSONWithoutQuotes = false + +// ExpMaxIterations specifies the maximum number of iterations needed to calculate +// precise natural exponent value using ExpHullAbrham method. +var ExpMaxIterations = 1000 + +// Zero constant, to make computations faster. +// Zero should never be compared with == or != directly, please use decimal.Equal or decimal.Cmp instead. +var Zero = New(0, 1) + +var zeroInt = big.NewInt(0) +var oneInt = big.NewInt(1) +var twoInt = big.NewInt(2) +var fourInt = big.NewInt(4) +var fiveInt = big.NewInt(5) +var tenInt = big.NewInt(10) +var twentyInt = big.NewInt(20) + +var factorials = []Decimal{New(1, 0)} + +// Decimal represents a fixed-point decimal. It is immutable. +// number = value * 10 ^ exp +type Decimal struct { + value *big.Int + + // NOTE(vadim): this must be an int32, because we cast it to float64 during + // calculations. If exp is 64 bit, we might lose precision. + // If we cared about being able to represent every possible decimal, we + // could make exp a *big.Int but it would hurt performance and numbers + // like that are unrealistic. + exp int32 +} + +// New returns a new fixed-point decimal, value * 10 ^ exp. +func New(value int64, exp int32) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: exp, + } +} + +// NewFromInt converts a int64 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt(value int64) Decimal { + return Decimal{ + value: big.NewInt(value), + exp: 0, + } +} + +// NewFromInt32 converts a int32 to Decimal. +// +// Example: +// +// NewFromInt(123).String() // output: "123" +// NewFromInt(-10).String() // output: "-10" +func NewFromInt32(value int32) Decimal { + return Decimal{ + value: big.NewInt(int64(value)), + exp: 0, + } +} + +// NewFromBigInt returns a new Decimal from a big.Int, value * 10 ^ exp +func NewFromBigInt(value *big.Int, exp int32) Decimal { + return Decimal{ + value: new(big.Int).Set(value), + exp: exp, + } +} + +// NewFromString returns a new Decimal from a string representation. +// Trailing zeroes are not trimmed. +// +// Example: +// +// d, err := NewFromString("-123.45") +// d2, err := NewFromString(".0001") +// d3, err := NewFromString("1.47000") +// +func NewFromString(value string) (Decimal, error) { + originalInput := value + var intString string + var exp int64 + + // Check if number is using scientific notation + eIndex := strings.IndexAny(value, "Ee") + if eIndex != -1 { + expInt, err := strconv.ParseInt(value[eIndex+1:], 10, 32) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", value) + } + return Decimal{}, fmt.Errorf("can't convert %s to decimal: exponent is not numeric", value) + } + value = value[:eIndex] + exp = expInt + } + + pIndex := -1 + vLen := len(value) + for i := 0; i < vLen; i++ { + if value[i] == '.' { + if pIndex > -1 { + return Decimal{}, fmt.Errorf("can't convert %s to decimal: too many .s", value) + } + pIndex = i + } + } + + if pIndex == -1 { + // There is no decimal point, we can just parse the original string as + // an int + intString = value + } else { + if pIndex+1 < vLen { + intString = value[:pIndex] + value[pIndex+1:] + } else { + intString = value[:pIndex] + } + expInt := -len(value[pIndex+1:]) + exp += int64(expInt) + } + + var dValue *big.Int + // strconv.ParseInt is faster than new(big.Int).SetString so this is just a shortcut for strings we know won't overflow + if len(intString) <= 18 { + parsed64, err := strconv.ParseInt(intString, 10, 64) + if err != nil { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + dValue = big.NewInt(parsed64) + } else { + dValue = new(big.Int) + _, ok := dValue.SetString(intString, 10) + if !ok { + return Decimal{}, fmt.Errorf("can't convert %s to decimal", value) + } + } + + if exp < math.MinInt32 || exp > math.MaxInt32 { + // NOTE(vadim): I doubt a string could realistically be this long + return Decimal{}, fmt.Errorf("can't convert %s to decimal: fractional part too long", originalInput) + } + + return Decimal{ + value: dValue, + exp: int32(exp), + }, nil +} + +// NewFromFormattedString returns a new Decimal from a formatted string representation. +// The second argument - replRegexp, is a regular expression that is used to find characters that should be +// removed from given decimal string representation. All matched characters will be replaced with an empty string. +// +// Example: +// +// r := regexp.MustCompile("[$,]") +// d1, err := NewFromFormattedString("$5,125.99", r) +// +// r2 := regexp.MustCompile("[_]") +// d2, err := NewFromFormattedString("1_000_000", r2) +// +// r3 := regexp.MustCompile("[USD\\s]") +// d3, err := NewFromFormattedString("5000 USD", r3) +// +func NewFromFormattedString(value string, replRegexp *regexp.Regexp) (Decimal, error) { + parsedValue := replRegexp.ReplaceAllString(value, "") + d, err := NewFromString(parsedValue) + if err != nil { + return Decimal{}, err + } + return d, nil +} + +// RequireFromString returns a new Decimal from a string representation +// or panics if NewFromString would have returned an error. +// +// Example: +// +// d := RequireFromString("-123.45") +// d2 := RequireFromString(".0001") +// +func RequireFromString(value string) Decimal { + dec, err := NewFromString(value) + if err != nil { + panic(err) + } + return dec +} + +// NewFromFloat converts a float64 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 15 digits, but may be more in some cases. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat(value float64) Decimal { + if value == 0 { + return New(0, 0) + } + return newFromFloat(value, math.Float64bits(value), &float64info) +} + +// NewFromFloat32 converts a float32 to Decimal. +// +// The converted number will contain the number of significant digits that can be +// represented in a float with reliable roundtrip. +// This is typically 6-8 digits depending on the input. +// See https://www.exploringbinary.com/decimal-precision-of-binary-floating-point-numbers/ for more information. +// +// For slightly faster conversion, use NewFromFloatWithExponent where you can specify the precision in absolute terms. +// +// NOTE: this will panic on NaN, +/-inf +func NewFromFloat32(value float32) Decimal { + if value == 0 { + return New(0, 0) + } + // XOR is workaround for https://github.com/golang/go/issues/26285 + a := math.Float32bits(value) ^ 0x80808080 + return newFromFloat(float64(value), uint64(a)^0x80808080, &float32info) +} + +func newFromFloat(val float64, bits uint64, flt *floatInfo) Decimal { + if math.IsNaN(val) || math.IsInf(val, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", val)) + } + exp := int(bits>>flt.mantbits) & (1<>(flt.expbits+flt.mantbits) != 0 + + roundShortest(&d, mant, exp, flt) + // If less than 19 digits, we can do calculation in an int64. + if d.nd < 19 { + tmp := int64(0) + m := int64(1) + for i := d.nd - 1; i >= 0; i-- { + tmp += m * int64(d.d[i]-'0') + m *= 10 + } + if d.neg { + tmp *= -1 + } + return Decimal{value: big.NewInt(tmp), exp: int32(d.dp) - int32(d.nd)} + } + dValue := new(big.Int) + dValue, ok := dValue.SetString(string(d.d[:d.nd]), 10) + if ok { + return Decimal{value: dValue, exp: int32(d.dp) - int32(d.nd)} + } + + return NewFromFloatWithExponent(val, int32(d.dp)-int32(d.nd)) +} + +// NewFromFloatWithExponent converts a float64 to Decimal, with an arbitrary +// number of fractional digits. +// +// Example: +// +// NewFromFloatWithExponent(123.456, -2).String() // output: "123.46" +// +func NewFromFloatWithExponent(value float64, exp int32) Decimal { + if math.IsNaN(value) || math.IsInf(value, 0) { + panic(fmt.Sprintf("Cannot create a Decimal from %v", value)) + } + + bits := math.Float64bits(value) + mant := bits & (1<<52 - 1) + exp2 := int32((bits >> 52) & (1<<11 - 1)) + sign := bits >> 63 + + if exp2 == 0 { + // specials + if mant == 0 { + return Decimal{} + } + // subnormal + exp2++ + } else { + // normal + mant |= 1 << 52 + } + + exp2 -= 1023 + 52 + + // normalizing base-2 values + for mant&1 == 0 { + mant = mant >> 1 + exp2++ + } + + // maximum number of fractional base-10 digits to represent 2^N exactly cannot be more than -N if N<0 + if exp < 0 && exp < exp2 { + if exp2 < 0 { + exp = exp2 + } else { + exp = 0 + } + } + + // representing 10^M * 2^N as 5^M * 2^(M+N) + exp2 -= exp + + temp := big.NewInt(1) + dMant := big.NewInt(int64(mant)) + + // applying 5^M + if exp > 0 { + temp = temp.SetInt64(int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + } else if exp < 0 { + temp = temp.SetInt64(-int64(exp)) + temp = temp.Exp(fiveInt, temp, nil) + dMant = dMant.Mul(dMant, temp) + temp = temp.SetUint64(1) + } + + // applying 2^(M+N) + if exp2 > 0 { + dMant = dMant.Lsh(dMant, uint(exp2)) + } else if exp2 < 0 { + temp = temp.Lsh(temp, uint(-exp2)) + } + + // rounding and downscaling + if exp > 0 || exp2 < 0 { + halfDown := new(big.Int).Rsh(temp, 1) + dMant = dMant.Add(dMant, halfDown) + dMant = dMant.Quo(dMant, temp) + } + + if sign == 1 { + dMant = dMant.Neg(dMant) + } + + return Decimal{ + value: dMant, + exp: exp, + } +} + +// Copy returns a copy of decimal with the same value and exponent, but a different pointer to value. +func (d Decimal) Copy() Decimal { + d.ensureInitialized() + return Decimal{ + value: &(*d.value), + exp: d.exp, + } +} + +// rescale returns a rescaled version of the decimal. Returned +// decimal may be less precise if the given exponent is bigger +// than the initial exponent of the Decimal. +// NOTE: this will truncate, NOT round +// +// Example: +// +// d := New(12345, -4) +// d2 := d.rescale(-1) +// d3 := d2.rescale(-4) +// println(d1) +// println(d2) +// println(d3) +// +// Output: +// +// 1.2345 +// 1.2 +// 1.2000 +// +func (d Decimal) rescale(exp int32) Decimal { + d.ensureInitialized() + + if d.exp == exp { + return Decimal{ + new(big.Int).Set(d.value), + d.exp, + } + } + + // NOTE(vadim): must convert exps to float64 before - to prevent overflow + diff := math.Abs(float64(exp) - float64(d.exp)) + value := new(big.Int).Set(d.value) + + expScale := new(big.Int).Exp(tenInt, big.NewInt(int64(diff)), nil) + if exp > d.exp { + value = value.Quo(value, expScale) + } else if exp < d.exp { + value = value.Mul(value, expScale) + } + + return Decimal{ + value: value, + exp: exp, + } +} + +// Abs returns the absolute value of the decimal. +func (d Decimal) Abs() Decimal { + if !d.IsNegative() { + return d + } + d.ensureInitialized() + d2Value := new(big.Int).Abs(d.value) + return Decimal{ + value: d2Value, + exp: d.exp, + } +} + +// Add returns d + d2. +func (d Decimal) Add(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Add(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Sub returns d - d2. +func (d Decimal) Sub(d2 Decimal) Decimal { + rd, rd2 := RescalePair(d, d2) + + d3Value := new(big.Int).Sub(rd.value, rd2.value) + return Decimal{ + value: d3Value, + exp: rd.exp, + } +} + +// Neg returns -d. +func (d Decimal) Neg() Decimal { + d.ensureInitialized() + val := new(big.Int).Neg(d.value) + return Decimal{ + value: val, + exp: d.exp, + } +} + +// Mul returns d * d2. +func (d Decimal) Mul(d2 Decimal) Decimal { + d.ensureInitialized() + d2.ensureInitialized() + + expInt64 := int64(d.exp) + int64(d2.exp) + if expInt64 > math.MaxInt32 || expInt64 < math.MinInt32 { + // NOTE(vadim): better to panic than give incorrect results, as + // Decimals are usually used for money + panic(fmt.Sprintf("exponent %v overflows an int32!", expInt64)) + } + + d3Value := new(big.Int).Mul(d.value, d2.value) + return Decimal{ + value: d3Value, + exp: int32(expInt64), + } +} + +// Shift shifts the decimal in base 10. +// It shifts left when shift is positive and right if shift is negative. +// In simpler terms, the given value for shift is added to the exponent +// of the decimal. +func (d Decimal) Shift(shift int32) Decimal { + d.ensureInitialized() + return Decimal{ + value: new(big.Int).Set(d.value), + exp: d.exp + shift, + } +} + +// Div returns d / d2. If it doesn't divide exactly, the result will have +// DivisionPrecision digits after the decimal point. +func (d Decimal) Div(d2 Decimal) Decimal { + return d.DivRound(d2, int32(DivisionPrecision)) +} + +// QuoRem does divsion with remainder +// d.QuoRem(d2,precision) returns quotient q and remainder r such that +// d = d2 * q + r, q an integer multiple of 10^(-precision) +// 0 <= r < abs(d2) * 10 ^(-precision) if d>=0 +// 0 >= r > -abs(d2) * 10 ^(-precision) if d<0 +// Note that precision<0 is allowed as input. +func (d Decimal) QuoRem(d2 Decimal, precision int32) (Decimal, Decimal) { + d.ensureInitialized() + d2.ensureInitialized() + if d2.value.Sign() == 0 { + panic("decimal division by 0") + } + scale := -precision + e := int64(d.exp - d2.exp - scale) + if e > math.MaxInt32 || e < math.MinInt32 { + panic("overflow in decimal QuoRem") + } + var aa, bb, expo big.Int + var scalerest int32 + // d = a 10^ea + // d2 = b 10^eb + if e < 0 { + aa = *d.value + expo.SetInt64(-e) + bb.Exp(tenInt, &expo, nil) + bb.Mul(d2.value, &bb) + scalerest = d.exp + // now aa = a + // bb = b 10^(scale + eb - ea) + } else { + expo.SetInt64(e) + aa.Exp(tenInt, &expo, nil) + aa.Mul(d.value, &aa) + bb = *d2.value + scalerest = scale + d2.exp + // now aa = a ^ (ea - eb - scale) + // bb = b + } + var q, r big.Int + q.QuoRem(&aa, &bb, &r) + dq := Decimal{value: &q, exp: scale} + dr := Decimal{value: &r, exp: scalerest} + return dq, dr +} + +// DivRound divides and rounds to a given precision +// i.e. to an integer multiple of 10^(-precision) +// for a positive quotient digit 5 is rounded up, away from 0 +// if the quotient is negative then digit 5 is rounded down, away from 0 +// Note that precision<0 is allowed as input. +func (d Decimal) DivRound(d2 Decimal, precision int32) Decimal { + // QuoRem already checks initialization + q, r := d.QuoRem(d2, precision) + // the actual rounding decision is based on comparing r*10^precision and d2/2 + // instead compare 2 r 10 ^precision and d2 + var rv2 big.Int + rv2.Abs(r.value) + rv2.Lsh(&rv2, 1) + // now rv2 = abs(r.value) * 2 + r2 := Decimal{value: &rv2, exp: r.exp + precision} + // r2 is now 2 * r * 10 ^ precision + var c = r2.Cmp(d2.Abs()) + + if c < 0 { + return q + } + + if d.value.Sign()*d2.value.Sign() < 0 { + return q.Sub(New(1, -precision)) + } + + return q.Add(New(1, -precision)) +} + +// Mod returns d % d2. +func (d Decimal) Mod(d2 Decimal) Decimal { + quo := d.Div(d2).Truncate(0) + return d.Sub(d2.Mul(quo)) +} + +// Pow returns d to the power d2 +func (d Decimal) Pow(d2 Decimal) Decimal { + var temp Decimal + if d2.IntPart() == 0 { + return NewFromFloat(1) + } + temp = d.Pow(d2.Div(NewFromFloat(2))) + if d2.IntPart()%2 == 0 { + return temp.Mul(temp) + } + if d2.IntPart() > 0 { + return temp.Mul(temp).Mul(d) + } + return temp.Mul(temp).Div(d) +} + +// ExpHullAbrham calculates the natural exponent of decimal (e to the power of d) using Hull-Abraham algorithm. +// OverallPrecision argument specifies the overall precision of the result (integer part + decimal part). +// +// ExpHullAbrham is faster than ExpTaylor for small precision values, but it is much slower for large precision values. +// +// Example: +// +// NewFromFloat(26.1).ExpHullAbrham(2).String() // output: "220000000000" +// NewFromFloat(26.1).ExpHullAbrham(20).String() // output: "216314672147.05767284" +// +func (d Decimal) ExpHullAbrham(overallPrecision uint32) (Decimal, error) { + // Algorithm based on Variable precision exponential function. + // ACM Transactions on Mathematical Software by T. E. Hull & A. Abrham. + if d.IsZero() { + return Decimal{oneInt, 0}, nil + } + + currentPrecision := overallPrecision + + // Algorithm does not work if currentPrecision * 23 < |x|. + // Precision is automatically increased in such cases, so the value can be calculated precisely. + // If newly calculated precision is higher than ExpMaxIterations the currentPrecision will not be changed. + f := d.Abs().InexactFloat64() + if ncp := f / 23; ncp > float64(currentPrecision) && ncp < float64(ExpMaxIterations) { + currentPrecision = uint32(math.Ceil(ncp)) + } + + // fail if abs(d) beyond an over/underflow threshold + overflowThreshold := New(23*int64(currentPrecision), 0) + if d.Abs().Cmp(overflowThreshold) > 0 { + return Decimal{}, fmt.Errorf("over/underflow threshold, exp(x) cannot be calculated precisely") + } + + // Return 1 if abs(d) small enough; this also avoids later over/underflow + overflowThreshold2 := New(9, -int32(currentPrecision)-1) + if d.Abs().Cmp(overflowThreshold2) <= 0 { + return Decimal{oneInt, d.exp}, nil + } + + // t is the smallest integer >= 0 such that the corresponding abs(d/k) < 1 + t := d.exp + int32(d.NumDigits()) // Add d.NumDigits because the paper assumes that d.value [0.1, 1) + + if t < 0 { + t = 0 + } + + k := New(1, t) // reduction factor + r := Decimal{new(big.Int).Set(d.value), d.exp - t} // reduced argument + p := int32(currentPrecision) + t + 2 // precision for calculating the sum + + // Determine n, the number of therms for calculating sum + // use first Newton step (1.435p - 1.182) / log10(p/abs(r)) + // for solving appropriate equation, along with directed + // roundings and simple rational bound for log10(p/abs(r)) + rf := r.Abs().InexactFloat64() + pf := float64(p) + nf := math.Ceil((1.453*pf - 1.182) / math.Log10(pf/rf)) + if nf > float64(ExpMaxIterations) || math.IsNaN(nf) { + return Decimal{}, fmt.Errorf("exact value cannot be calculated in <=ExpMaxIterations iterations") + } + n := int64(nf) + + tmp := New(0, 0) + sum := New(1, 0) + one := New(1, 0) + for i := n - 1; i > 0; i-- { + tmp.value.SetInt64(i) + sum = sum.Mul(r.DivRound(tmp, p)) + sum = sum.Add(one) + } + + ki := k.IntPart() + res := New(1, 0) + for i := ki; i > 0; i-- { + res = res.Mul(sum) + } + + resNumDigits := int32(res.NumDigits()) + + var roundDigits int32 + if resNumDigits > abs(res.exp) { + roundDigits = int32(currentPrecision) - resNumDigits - res.exp + } else { + roundDigits = int32(currentPrecision) + } + + res = res.Round(roundDigits) + + return res, nil +} + +// ExpTaylor calculates the natural exponent of decimal (e to the power of d) using Taylor series expansion. +// Precision argument specifies how precise the result must be (number of digits after decimal point). +// Negative precision is allowed. +// +// ExpTaylor is much faster for large precision values than ExpHullAbrham. +// +// Example: +// +// d, err := NewFromFloat(26.1).ExpTaylor(2).String() +// d.String() // output: "216314672147.06" +// +// NewFromFloat(26.1).ExpTaylor(20).String() +// d.String() // output: "216314672147.05767284062928674083" +// +// NewFromFloat(26.1).ExpTaylor(-10).String() +// d.String() // output: "220000000000" +// +func (d Decimal) ExpTaylor(precision int32) (Decimal, error) { + // Note(mwoss): Implementation can be optimized by exclusively using big.Int API only + if d.IsZero() { + return Decimal{oneInt, 0}.Round(precision), nil + } + + var epsilon Decimal + var divPrecision int32 + if precision < 0 { + epsilon = New(1, -1) + divPrecision = 8 + } else { + epsilon = New(1, -precision-1) + divPrecision = precision + 1 + } + + decAbs := d.Abs() + pow := d.Abs() + factorial := New(1, 0) + + result := New(1, 0) + + for i := int64(1); ; { + step := pow.DivRound(factorial, divPrecision) + result = result.Add(step) + + // Stop Taylor series when current step is smaller than epsilon + if step.Cmp(epsilon) < 0 { + break + } + + pow = pow.Mul(decAbs) + + i++ + + // Calculate next factorial number or retrieve cached value + if len(factorials) >= int(i) && !factorials[i-1].IsZero() { + factorial = factorials[i-1] + } else { + // To avoid any race conditions, firstly the zero value is appended to a slice to create + // a spot for newly calculated factorial. After that, the zero value is replaced by calculated + // factorial using the index notation. + factorial = factorials[i-2].Mul(New(i, 0)) + factorials = append(factorials, Zero) + factorials[i-1] = factorial + } + } + + if d.Sign() < 0 { + result = New(1, 0).DivRound(result, precision+1) + } + + result = result.Round(precision) + return result, nil +} + +// NumDigits returns the number of digits of the decimal coefficient (d.Value) +// Note: Current implementation is extremely slow for large decimals and/or decimals with large fractional part +func (d Decimal) NumDigits() int { + // Note(mwoss): It can be optimized, unnecessary cast of big.Int to string + if d.IsNegative() { + return len(d.value.String()) - 1 + } + return len(d.value.String()) +} + +// IsInteger returns true when decimal can be represented as an integer value, otherwise, it returns false. +func (d Decimal) IsInteger() bool { + // The most typical case, all decimal with exponent higher or equal 0 can be represented as integer + if d.exp >= 0 { + return true + } + // When the exponent is negative we have to check every number after the decimal place + // If all of them are zeroes, we are sure that given decimal can be represented as an integer + var r big.Int + q := new(big.Int).Set(d.value) + for z := abs(d.exp); z > 0; z-- { + q.QuoRem(q, tenInt, &r) + if r.Cmp(zeroInt) != 0 { + return false + } + } + return true +} + +// Abs calculates absolute value of any int32. Used for calculating absolute value of decimal's exponent. +func abs(n int32) int32 { + if n < 0 { + return -n + } + return n +} + +// Cmp compares the numbers represented by d and d2 and returns: +// +// -1 if d < d2 +// 0 if d == d2 +// +1 if d > d2 +// +func (d Decimal) Cmp(d2 Decimal) int { + d.ensureInitialized() + d2.ensureInitialized() + + if d.exp == d2.exp { + return d.value.Cmp(d2.value) + } + + rd, rd2 := RescalePair(d, d2) + + return rd.value.Cmp(rd2.value) +} + +// Equal returns whether the numbers represented by d and d2 are equal. +func (d Decimal) Equal(d2 Decimal) bool { + return d.Cmp(d2) == 0 +} + +// Equals is deprecated, please use Equal method instead +func (d Decimal) Equals(d2 Decimal) bool { + return d.Equal(d2) +} + +// GreaterThan (GT) returns true when d is greater than d2. +func (d Decimal) GreaterThan(d2 Decimal) bool { + return d.Cmp(d2) == 1 +} + +// GreaterThanOrEqual (GTE) returns true when d is greater than or equal to d2. +func (d Decimal) GreaterThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == 1 || cmp == 0 +} + +// LessThan (LT) returns true when d is less than d2. +func (d Decimal) LessThan(d2 Decimal) bool { + return d.Cmp(d2) == -1 +} + +// LessThanOrEqual (LTE) returns true when d is less than or equal to d2. +func (d Decimal) LessThanOrEqual(d2 Decimal) bool { + cmp := d.Cmp(d2) + return cmp == -1 || cmp == 0 +} + +// Sign returns: +// +// -1 if d < 0 +// 0 if d == 0 +// +1 if d > 0 +// +func (d Decimal) Sign() int { + if d.value == nil { + return 0 + } + return d.value.Sign() +} + +// IsPositive return +// +// true if d > 0 +// false if d == 0 +// false if d < 0 +func (d Decimal) IsPositive() bool { + return d.Sign() == 1 +} + +// IsNegative return +// +// true if d < 0 +// false if d == 0 +// false if d > 0 +func (d Decimal) IsNegative() bool { + return d.Sign() == -1 +} + +// IsZero return +// +// true if d == 0 +// false if d > 0 +// false if d < 0 +func (d Decimal) IsZero() bool { + return d.Sign() == 0 +} + +// Exponent returns the exponent, or scale component of the decimal. +func (d Decimal) Exponent() int32 { + return d.exp +} + +// Coefficient returns the coefficient of the decimal. It is scaled by 10^Exponent() +func (d Decimal) Coefficient() *big.Int { + d.ensureInitialized() + // we copy the coefficient so that mutating the result does not mutate the Decimal. + return new(big.Int).Set(d.value) +} + +// CoefficientInt64 returns the coefficient of the decimal as int64. It is scaled by 10^Exponent() +// If coefficient cannot be represented in an int64, the result will be undefined. +func (d Decimal) CoefficientInt64() int64 { + d.ensureInitialized() + return d.value.Int64() +} + +// IntPart returns the integer component of the decimal. +func (d Decimal) IntPart() int64 { + scaledD := d.rescale(0) + return scaledD.value.Int64() +} + +// BigInt returns integer component of the decimal as a BigInt. +func (d Decimal) BigInt() *big.Int { + scaledD := d.rescale(0) + i := &big.Int{} + i.SetString(scaledD.String(), 10) + return i +} + +// BigFloat returns decimal as BigFloat. +// Be aware that casting decimal to BigFloat might cause a loss of precision. +func (d Decimal) BigFloat() *big.Float { + f := &big.Float{} + f.SetString(d.String()) + return f +} + +// Rat returns a rational number representation of the decimal. +func (d Decimal) Rat() *big.Rat { + d.ensureInitialized() + if d.exp <= 0 { + // NOTE(vadim): must negate after casting to prevent int32 overflow + denom := new(big.Int).Exp(tenInt, big.NewInt(-int64(d.exp)), nil) + return new(big.Rat).SetFrac(d.value, denom) + } + + mul := new(big.Int).Exp(tenInt, big.NewInt(int64(d.exp)), nil) + num := new(big.Int).Mul(d.value, mul) + return new(big.Rat).SetFrac(num, oneInt) +} + +// Float64 returns the nearest float64 value for d and a bool indicating +// whether f represents d exactly. +// For more details, see the documentation for big.Rat.Float64 +func (d Decimal) Float64() (f float64, exact bool) { + return d.Rat().Float64() +} + +// InexactFloat64 returns the nearest float64 value for d. +// It doesn't indicate if the returned value represents d exactly. +func (d Decimal) InexactFloat64() float64 { + f, _ := d.Float64() + return f +} + +// String returns the string representation of the decimal +// with the fixed point. +// +// Example: +// +// d := New(-12345, -3) +// println(d.String()) +// +// Output: +// +// -12.345 +// +func (d Decimal) String() string { + return d.string(true) +} + +// StringFixed returns a rounded fixed-point string with places digits after +// the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixed(2) // output: "0.00" +// NewFromFloat(0).StringFixed(0) // output: "0" +// NewFromFloat(5.45).StringFixed(0) // output: "5" +// NewFromFloat(5.45).StringFixed(1) // output: "5.5" +// NewFromFloat(5.45).StringFixed(2) // output: "5.45" +// NewFromFloat(5.45).StringFixed(3) // output: "5.450" +// NewFromFloat(545).StringFixed(-1) // output: "550" +// +func (d Decimal) StringFixed(places int32) string { + rounded := d.Round(places) + return rounded.string(false) +} + +// StringFixedBank returns a banker rounded fixed-point string with places digits +// after the decimal point. +// +// Example: +// +// NewFromFloat(0).StringFixedBank(2) // output: "0.00" +// NewFromFloat(0).StringFixedBank(0) // output: "0" +// NewFromFloat(5.45).StringFixedBank(0) // output: "5" +// NewFromFloat(5.45).StringFixedBank(1) // output: "5.4" +// NewFromFloat(5.45).StringFixedBank(2) // output: "5.45" +// NewFromFloat(5.45).StringFixedBank(3) // output: "5.450" +// NewFromFloat(545).StringFixedBank(-1) // output: "540" +// +func (d Decimal) StringFixedBank(places int32) string { + rounded := d.RoundBank(places) + return rounded.string(false) +} + +// StringFixedCash returns a Swedish/Cash rounded fixed-point string. For +// more details see the documentation at function RoundCash. +func (d Decimal) StringFixedCash(interval uint8) string { + rounded := d.RoundCash(interval) + return rounded.string(false) +} + +// Round rounds the decimal to places decimal places. +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Example: +// +// NewFromFloat(5.45).Round(1).String() // output: "5.5" +// NewFromFloat(545).Round(-1).String() // output: "550" +// +func (d Decimal) Round(places int32) Decimal { + if d.exp == -places { + return d + } + // truncate to places + 1 + ret := d.rescale(-places - 1) + + // add sign(d) * 0.5 + if ret.value.Sign() < 0 { + ret.value.Sub(ret.value, fiveInt) + } else { + ret.value.Add(ret.value, fiveInt) + } + + // floor for positive numbers, ceil for negative numbers + _, m := ret.value.DivMod(ret.value, tenInt, new(big.Int)) + ret.exp++ + if ret.value.Sign() < 0 && m.Cmp(zeroInt) != 0 { + ret.value.Add(ret.value, oneInt) + } + + return ret +} + +// RoundCeil rounds the decimal towards +infinity. +// +// Example: +// +// NewFromFloat(545).RoundCeil(-2).String() // output: "600" +// NewFromFloat(500).RoundCeil(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundCeil(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundCeil(1).String() // output: "-1.5" +// +func (d Decimal) RoundCeil(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundFloor rounds the decimal towards -infinity. +// +// Example: +// +// NewFromFloat(545).RoundFloor(-2).String() // output: "500" +// NewFromFloat(-500).RoundFloor(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundFloor(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundFloor(1).String() // output: "-1.4" +// +func (d Decimal) RoundFloor(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundUp rounds the decimal away from zero. +// +// Example: +// +// NewFromFloat(545).RoundUp(-2).String() // output: "600" +// NewFromFloat(500).RoundUp(-2).String() // output: "500" +// NewFromFloat(1.1001).RoundUp(2).String() // output: "1.11" +// NewFromFloat(-1.454).RoundUp(1).String() // output: "-1.4" +// +func (d Decimal) RoundUp(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + + if d.value.Sign() > 0 { + rescaled.value.Add(rescaled.value, oneInt) + } else if d.value.Sign() < 0 { + rescaled.value.Sub(rescaled.value, oneInt) + } + + return rescaled +} + +// RoundDown rounds the decimal towards zero. +// +// Example: +// +// NewFromFloat(545).RoundDown(-2).String() // output: "500" +// NewFromFloat(-500).RoundDown(-2).String() // output: "-500" +// NewFromFloat(1.1001).RoundDown(2).String() // output: "1.1" +// NewFromFloat(-1.454).RoundDown(1).String() // output: "-1.5" +// +func (d Decimal) RoundDown(places int32) Decimal { + if d.exp >= -places { + return d + } + + rescaled := d.rescale(-places) + if d.Equal(rescaled) { + return d + } + return rescaled +} + +// RoundBank rounds the decimal to places decimal places. +// If the final digit to round is equidistant from the nearest two integers the +// rounded value is taken as the even number +// +// If places < 0, it will round the integer part to the nearest 10^(-places). +// +// Examples: +// +// NewFromFloat(5.45).RoundBank(1).String() // output: "5.4" +// NewFromFloat(545).RoundBank(-1).String() // output: "540" +// NewFromFloat(5.46).RoundBank(1).String() // output: "5.5" +// NewFromFloat(546).RoundBank(-1).String() // output: "550" +// NewFromFloat(5.55).RoundBank(1).String() // output: "5.6" +// NewFromFloat(555).RoundBank(-1).String() // output: "560" +// +func (d Decimal) RoundBank(places int32) Decimal { + + round := d.Round(places) + remainder := d.Sub(round).Abs() + + half := New(5, -places-1) + if remainder.Cmp(half) == 0 && round.value.Bit(0) != 0 { + if round.value.Sign() < 0 { + round.value.Add(round.value, oneInt) + } else { + round.value.Sub(round.value, oneInt) + } + } + + return round +} + +// RoundCash aka Cash/Penny/öre rounding rounds decimal to a specific +// interval. The amount payable for a cash transaction is rounded to the nearest +// multiple of the minimum currency unit available. The following intervals are +// available: 5, 10, 25, 50 and 100; any other number throws a panic. +// 5: 5 cent rounding 3.43 => 3.45 +// 10: 10 cent rounding 3.45 => 3.50 (5 gets rounded up) +// 25: 25 cent rounding 3.41 => 3.50 +// 50: 50 cent rounding 3.75 => 4.00 +// 100: 100 cent rounding 3.50 => 4.00 +// For more details: https://en.wikipedia.org/wiki/Cash_rounding +func (d Decimal) RoundCash(interval uint8) Decimal { + var iVal *big.Int + switch interval { + case 5: + iVal = twentyInt + case 10: + iVal = tenInt + case 25: + iVal = fourInt + case 50: + iVal = twoInt + case 100: + iVal = oneInt + default: + panic(fmt.Sprintf("Decimal does not support this Cash rounding interval `%d`. Supported: 5, 10, 25, 50, 100", interval)) + } + dVal := Decimal{ + value: iVal, + } + + // TODO: optimize those calculations to reduce the high allocations (~29 allocs). + return d.Mul(dVal).Round(0).Div(dVal).Truncate(2) +} + +// Floor returns the nearest integer value less than or equal to d. +func (d Decimal) Floor() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z := new(big.Int).Div(d.value, exp) + return Decimal{value: z, exp: 0} +} + +// Ceil returns the nearest integer value greater than or equal to d. +func (d Decimal) Ceil() Decimal { + d.ensureInitialized() + + if d.exp >= 0 { + return d + } + + exp := big.NewInt(10) + + // NOTE(vadim): must negate after casting to prevent int32 overflow + exp.Exp(exp, big.NewInt(-int64(d.exp)), nil) + + z, m := new(big.Int).DivMod(d.value, exp, new(big.Int)) + if m.Cmp(zeroInt) != 0 { + z.Add(z, oneInt) + } + return Decimal{value: z, exp: 0} +} + +// Truncate truncates off digits from the number, without rounding. +// +// NOTE: precision is the last digit that will not be truncated (must be >= 0). +// +// Example: +// +// decimal.NewFromString("123.456").Truncate(2).String() // "123.45" +// +func (d Decimal) Truncate(precision int32) Decimal { + d.ensureInitialized() + if precision >= 0 && -precision > d.exp { + return d.rescale(-precision) + } + return d +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *Decimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + return nil + } + + str, err := unquoteIfQuoted(decimalBytes) + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", decimalBytes, err) + } + + decimal, err := NewFromString(str) + *d = decimal + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Decimal) MarshalJSON() ([]byte, error) { + var str string + if MarshalJSONWithoutQuotes { + str = d.String() + } else { + str = "\"" + d.String() + "\"" + } + return []byte(str), nil +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. As a string representation +// is already used when encoding to text, this method stores that string as []byte +func (d *Decimal) UnmarshalBinary(data []byte) error { + // Verify we have at least 4 bytes for the exponent. The GOB encoded value + // may be empty. + if len(data) < 4 { + return fmt.Errorf("error decoding binary %v: expected at least 4 bytes, got %d", data, len(data)) + } + + // Extract the exponent + d.exp = int32(binary.BigEndian.Uint32(data[:4])) + + // Extract the value + d.value = new(big.Int) + if err := d.value.GobDecode(data[4:]); err != nil { + return fmt.Errorf("error decoding binary %v: %s", data, err) + } + + return nil +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (d Decimal) MarshalBinary() (data []byte, err error) { + // Write the exponent first since it's a fixed size + v1 := make([]byte, 4) + binary.BigEndian.PutUint32(v1, uint32(d.exp)) + + // Add the value + var v2 []byte + if v2, err = d.value.GobEncode(); err != nil { + return + } + + // Return the byte array + data = append(v1, v2...) + return +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *Decimal) Scan(value interface{}) error { + // first try to see if the data is stored in database as a Numeric datatype + switch v := value.(type) { + + case float32: + *d = NewFromFloat(float64(v)) + return nil + + case float64: + // numeric in sqlite3 sends us float64 + *d = NewFromFloat(v) + return nil + + case int64: + // at least in sqlite3 when the value is 0 in db, the data is sent + // to us as an int64 instead of a float64 ... + *d = New(v, 0) + return nil + + default: + // default is trying to interpret value stored as string + str, err := unquoteIfQuoted(v) + if err != nil { + return err + } + *d, err = NewFromString(str) + return err + } +} + +// Value implements the driver.Valuer interface for database serialization. +func (d Decimal) Value() (driver.Value, error) { + return d.String(), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization. +func (d *Decimal) UnmarshalText(text []byte) error { + str := string(text) + + dec, err := NewFromString(str) + *d = dec + if err != nil { + return fmt.Errorf("error decoding string '%s': %s", str, err) + } + + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d Decimal) MarshalText() (text []byte, err error) { + return []byte(d.String()), nil +} + +// GobEncode implements the gob.GobEncoder interface for gob serialization. +func (d Decimal) GobEncode() ([]byte, error) { + return d.MarshalBinary() +} + +// GobDecode implements the gob.GobDecoder interface for gob serialization. +func (d *Decimal) GobDecode(data []byte) error { + return d.UnmarshalBinary(data) +} + +// StringScaled first scales the decimal then calls .String() on it. +// NOTE: buggy, unintuitive, and DEPRECATED! Use StringFixed instead. +func (d Decimal) StringScaled(exp int32) string { + return d.rescale(exp).String() +} + +func (d Decimal) string(trimTrailingZeros bool) string { + if d.exp >= 0 { + return d.rescale(0).value.String() + } + + abs := new(big.Int).Abs(d.value) + str := abs.String() + + var intPart, fractionalPart string + + // NOTE(vadim): this cast to int will cause bugs if d.exp == INT_MIN + // and you are on a 32-bit machine. Won't fix this super-edge case. + dExpInt := int(d.exp) + if len(str) > -dExpInt { + intPart = str[:len(str)+dExpInt] + fractionalPart = str[len(str)+dExpInt:] + } else { + intPart = "0" + + num0s := -dExpInt - len(str) + fractionalPart = strings.Repeat("0", num0s) + str + } + + if trimTrailingZeros { + i := len(fractionalPart) - 1 + for ; i >= 0; i-- { + if fractionalPart[i] != '0' { + break + } + } + fractionalPart = fractionalPart[:i+1] + } + + number := intPart + if len(fractionalPart) > 0 { + number += "." + fractionalPart + } + + if d.value.Sign() < 0 { + return "-" + number + } + + return number +} + +func (d *Decimal) ensureInitialized() { + if d.value == nil { + d.value = new(big.Int) + } +} + +// Min returns the smallest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Min(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Min with 0 arguments. +func Min(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) < 0 { + ans = item + } + } + return ans +} + +// Max returns the largest Decimal that was passed in the arguments. +// +// To call this function with an array, you must do: +// +// Max(arr[0], arr[1:]...) +// +// This makes it harder to accidentally call Max with 0 arguments. +func Max(first Decimal, rest ...Decimal) Decimal { + ans := first + for _, item := range rest { + if item.Cmp(ans) > 0 { + ans = item + } + } + return ans +} + +// Sum returns the combined total of the provided first and rest Decimals +func Sum(first Decimal, rest ...Decimal) Decimal { + total := first + for _, item := range rest { + total = total.Add(item) + } + + return total +} + +// Avg returns the average value of the provided first and rest Decimals +func Avg(first Decimal, rest ...Decimal) Decimal { + count := New(int64(len(rest)+1), 0) + sum := Sum(first, rest...) + return sum.Div(count) +} + +// RescalePair rescales two decimals to common exponential value (minimal exp of both decimals) +func RescalePair(d1 Decimal, d2 Decimal) (Decimal, Decimal) { + d1.ensureInitialized() + d2.ensureInitialized() + + if d1.exp == d2.exp { + return d1, d2 + } + + baseScale := min(d1.exp, d2.exp) + if baseScale != d1.exp { + return d1.rescale(baseScale), d2 + } + return d1, d2.rescale(baseScale) +} + +func min(x, y int32) int32 { + if x >= y { + return y + } + return x +} + +func unquoteIfQuoted(value interface{}) (string, error) { + var bytes []byte + + switch v := value.(type) { + case string: + bytes = []byte(v) + case []byte: + bytes = v + default: + return "", fmt.Errorf("could not convert value '%+v' to byte array of type '%T'", + value, value) + } + + // If the amount is quoted, strip the quotes + if len(bytes) > 2 && bytes[0] == '"' && bytes[len(bytes)-1] == '"' { + bytes = bytes[1 : len(bytes)-1] + } + return string(bytes), nil +} + +// NullDecimal represents a nullable decimal with compatibility for +// scanning null values from the database. +type NullDecimal struct { + Decimal Decimal + Valid bool +} + +func NewNullDecimal(d Decimal) NullDecimal { + return NullDecimal{ + Decimal: d, + Valid: true, + } +} + +// Scan implements the sql.Scanner interface for database deserialization. +func (d *NullDecimal) Scan(value interface{}) error { + if value == nil { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.Scan(value) +} + +// Value implements the driver.Valuer interface for database serialization. +func (d NullDecimal) Value() (driver.Value, error) { + if !d.Valid { + return nil, nil + } + return d.Decimal.Value() +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (d *NullDecimal) UnmarshalJSON(decimalBytes []byte) error { + if string(decimalBytes) == "null" { + d.Valid = false + return nil + } + d.Valid = true + return d.Decimal.UnmarshalJSON(decimalBytes) +} + +// MarshalJSON implements the json.Marshaler interface. +func (d NullDecimal) MarshalJSON() ([]byte, error) { + if !d.Valid { + return []byte("null"), nil + } + return d.Decimal.MarshalJSON() +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface for XML +// deserialization +func (d *NullDecimal) UnmarshalText(text []byte) error { + str := string(text) + + // check for empty XML or XML without body e.g., + if str == "" { + d.Valid = false + return nil + } + if err := d.Decimal.UnmarshalText(text); err != nil { + d.Valid = false + return err + } + d.Valid = true + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface for XML +// serialization. +func (d NullDecimal) MarshalText() (text []byte, err error) { + if !d.Valid { + return []byte{}, nil + } + return d.Decimal.MarshalText() +} + +// Trig functions + +// Atan returns the arctangent, in radians, of x. +func (d Decimal) Atan() Decimal { + if d.Equal(NewFromFloat(0.0)) { + return d + } + if d.GreaterThan(NewFromFloat(0.0)) { + return d.satan() + } + return d.Neg().satan().Neg() +} + +func (d Decimal) xatan() Decimal { + P0 := NewFromFloat(-8.750608600031904122785e-01) + P1 := NewFromFloat(-1.615753718733365076637e+01) + P2 := NewFromFloat(-7.500855792314704667340e+01) + P3 := NewFromFloat(-1.228866684490136173410e+02) + P4 := NewFromFloat(-6.485021904942025371773e+01) + Q0 := NewFromFloat(2.485846490142306297962e+01) + Q1 := NewFromFloat(1.650270098316988542046e+02) + Q2 := NewFromFloat(4.328810604912902668951e+02) + Q3 := NewFromFloat(4.853903996359136964868e+02) + Q4 := NewFromFloat(1.945506571482613964425e+02) + z := d.Mul(d) + b1 := P0.Mul(z).Add(P1).Mul(z).Add(P2).Mul(z).Add(P3).Mul(z).Add(P4).Mul(z) + b2 := z.Add(Q0).Mul(z).Add(Q1).Mul(z).Add(Q2).Mul(z).Add(Q3).Mul(z).Add(Q4) + z = b1.Div(b2) + z = d.Mul(z).Add(d) + return z +} + +// satan reduces its argument (known to be positive) +// to the range [0, 0.66] and calls xatan. +func (d Decimal) satan() Decimal { + Morebits := NewFromFloat(6.123233995736765886130e-17) // pi/2 = PIO2 + Morebits + Tan3pio8 := NewFromFloat(2.41421356237309504880) // tan(3*pi/8) + pi := NewFromFloat(3.14159265358979323846264338327950288419716939937510582097494459) + + if d.LessThanOrEqual(NewFromFloat(0.66)) { + return d.xatan() + } + if d.GreaterThan(Tan3pio8) { + return pi.Div(NewFromFloat(2.0)).Sub(NewFromFloat(1.0).Div(d).xatan()).Add(Morebits) + } + return pi.Div(NewFromFloat(4.0)).Add((d.Sub(NewFromFloat(1.0)).Div(d.Add(NewFromFloat(1.0)))).xatan()).Add(NewFromFloat(0.5).Mul(Morebits)) +} + +// sin coefficients +var _sin = [...]Decimal{ + NewFromFloat(1.58962301576546568060e-10), // 0x3de5d8fd1fd19ccd + NewFromFloat(-2.50507477628578072866e-8), // 0xbe5ae5e5a9291f5d + NewFromFloat(2.75573136213857245213e-6), // 0x3ec71de3567d48a1 + NewFromFloat(-1.98412698295895385996e-4), // 0xbf2a01a019bfdf03 + NewFromFloat(8.33333333332211858878e-3), // 0x3f8111111110f7d0 + NewFromFloat(-1.66666666666666307295e-1), // 0xbfc5555555555548 +} + +// Sin returns the sine of the radian argument x. +func (d Decimal) Sin() Decimal { + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } else { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } + if sign { + y = y.Neg() + } + return y +} + +// cos coefficients +var _cos = [...]Decimal{ + NewFromFloat(-1.13585365213876817300e-11), // 0xbda8fa49a0861a9b + NewFromFloat(2.08757008419747316778e-9), // 0x3e21ee9d7b4e3f05 + NewFromFloat(-2.75573141792967388112e-7), // 0xbe927e4f7eac4bc6 + NewFromFloat(2.48015872888517045348e-5), // 0x3efa01a019c844f5 + NewFromFloat(-1.38888888888730564116e-3), // 0xbf56c16c16c14f91 + NewFromFloat(4.16666666666665929218e-2), // 0x3fa555555555554b +} + +// Cos returns the cosine of the radian argument x. +func (d Decimal) Cos() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + // make argument positive + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + j &= 7 // octant modulo 2Pi radians (360 degrees) + // reflect in x axis + if j > 3 { + sign = !sign + j -= 4 + } + if j > 1 { + sign = !sign + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if j == 1 || j == 2 { + y = z.Add(z.Mul(zz).Mul(_sin[0].Mul(zz).Add(_sin[1]).Mul(zz).Add(_sin[2]).Mul(zz).Add(_sin[3]).Mul(zz).Add(_sin[4]).Mul(zz).Add(_sin[5]))) + } else { + w := zz.Mul(zz).Mul(_cos[0].Mul(zz).Add(_cos[1]).Mul(zz).Add(_cos[2]).Mul(zz).Add(_cos[3]).Mul(zz).Add(_cos[4]).Mul(zz).Add(_cos[5])) + y = NewFromFloat(1.0).Sub(NewFromFloat(0.5).Mul(zz)).Add(w) + } + if sign { + y = y.Neg() + } + return y +} + +var _tanP = [...]Decimal{ + NewFromFloat(-1.30936939181383777646e+4), // 0xc0c992d8d24f3f38 + NewFromFloat(1.15351664838587416140e+6), // 0x413199eca5fc9ddd + NewFromFloat(-1.79565251976484877988e+7), // 0xc1711fead3299176 +} +var _tanQ = [...]Decimal{ + NewFromFloat(1.00000000000000000000e+0), + NewFromFloat(1.36812963470692954678e+4), //0x40cab8a5eeb36572 + NewFromFloat(-1.32089234440210967447e+6), //0xc13427bc582abc96 + NewFromFloat(2.50083801823357915839e+7), //0x4177d98fc2ead8ef + NewFromFloat(-5.38695755929454629881e+7), //0xc189afe03cbe5a31 +} + +// Tan returns the tangent of the radian argument x. +func (d Decimal) Tan() Decimal { + + PI4A := NewFromFloat(7.85398125648498535156e-1) // 0x3fe921fb40000000, Pi/4 split into three parts + PI4B := NewFromFloat(3.77489470793079817668e-8) // 0x3e64442d00000000, + PI4C := NewFromFloat(2.69515142907905952645e-15) // 0x3ce8469898cc5170, + M4PI := NewFromFloat(1.273239544735162542821171882678754627704620361328125) // 4/pi + + if d.Equal(NewFromFloat(0.0)) { + return d + } + + // make argument positive but save the sign + sign := false + if d.LessThan(NewFromFloat(0.0)) { + d = d.Neg() + sign = true + } + + j := d.Mul(M4PI).IntPart() // integer part of x/(Pi/4), as integer for tests on the phase angle + y := NewFromFloat(float64(j)) // integer part of x/(Pi/4), as float + + // map zeros to origin + if j&1 == 1 { + j++ + y = y.Add(NewFromFloat(1.0)) + } + + z := d.Sub(y.Mul(PI4A)).Sub(y.Mul(PI4B)).Sub(y.Mul(PI4C)) // Extended precision modular arithmetic + zz := z.Mul(z) + + if zz.GreaterThan(NewFromFloat(1e-14)) { + w := zz.Mul(_tanP[0].Mul(zz).Add(_tanP[1]).Mul(zz).Add(_tanP[2])) + x := zz.Add(_tanQ[1]).Mul(zz).Add(_tanQ[2]).Mul(zz).Add(_tanQ[3]).Mul(zz).Add(_tanQ[4]) + y = z.Add(z.Mul(w.Div(x))) + } else { + y = z + } + if j&2 == 2 { + y = NewFromFloat(-1.0).Div(y) + } + if sign { + y = y.Neg() + } + return y +} diff --git a/vendor/github.com/shopspring/decimal/rounding.go b/vendor/github.com/shopspring/decimal/rounding.go new file mode 100644 index 000000000..d4b0cd007 --- /dev/null +++ b/vendor/github.com/shopspring/decimal/rounding.go @@ -0,0 +1,160 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Multiprecision decimal numbers. +// For floating-point formatting only; not general purpose. +// Only operations are assign and (binary) left/right shift. +// Can do binary floating point in multiprecision decimal precisely +// because 2 divides 10; cannot do decimal floating point +// in multiprecision binary precisely. + +package decimal + +type floatInfo struct { + mantbits uint + expbits uint + bias int +} + +var float32info = floatInfo{23, 8, -127} +var float64info = floatInfo{52, 11, -1023} + +// roundShortest rounds d (= mant * 2^exp) to the shortest number of digits +// that will let the original floating point value be precisely reconstructed. +func roundShortest(d *decimal, mant uint64, exp int, flt *floatInfo) { + // If mantissa is zero, the number is zero; stop now. + if mant == 0 { + d.nd = 0 + return + } + + // Compute upper and lower such that any decimal number + // between upper and lower (possibly inclusive) + // will round to the original floating point number. + + // We may see at once that the number is already shortest. + // + // Suppose d is not denormal, so that 2^exp <= d < 10^dp. + // The closest shorter number is at least 10^(dp-nd) away. + // The lower/upper bounds computed below are at distance + // at most 2^(exp-mantbits). + // + // So the number is already shortest if 10^(dp-nd) > 2^(exp-mantbits), + // or equivalently log2(10)*(dp-nd) > exp-mantbits. + // It is true if 332/100*(dp-nd) >= exp-mantbits (log2(10) > 3.32). + minexp := flt.bias + 1 // minimum possible exponent + if exp > minexp && 332*(d.dp-d.nd) >= 100*(exp-int(flt.mantbits)) { + // The number is already shortest. + return + } + + // d = mant << (exp - mantbits) + // Next highest floating point number is mant+1 << exp-mantbits. + // Our upper bound is halfway between, mant*2+1 << exp-mantbits-1. + upper := new(decimal) + upper.Assign(mant*2 + 1) + upper.Shift(exp - int(flt.mantbits) - 1) + + // d = mant << (exp - mantbits) + // Next lowest floating point number is mant-1 << exp-mantbits, + // unless mant-1 drops the significant bit and exp is not the minimum exp, + // in which case the next lowest is mant*2-1 << exp-mantbits-1. + // Either way, call it mantlo << explo-mantbits. + // Our lower bound is halfway between, mantlo*2+1 << explo-mantbits-1. + var mantlo uint64 + var explo int + if mant > 1<= d.nd { + break + } + li := ui - upper.dp + lower.dp + l := byte('0') // lower digit + if li >= 0 && li < lower.nd { + l = lower.d[li] + } + m := byte('0') // middle digit + if mi >= 0 { + m = d.d[mi] + } + u := byte('0') // upper digit + if ui < upper.nd { + u = upper.d[ui] + } + + // Okay to round down (truncate) if lower has a different digit + // or if lower is inclusive and is exactly the result of rounding + // down (i.e., and we have reached the final digit of lower). + okdown := l != m || inclusive && li+1 == lower.nd + + switch { + case upperdelta == 0 && m+1 < u: + // Example: + // m = 12345xxx + // u = 12347xxx + upperdelta = 2 + case upperdelta == 0 && m != u: + // Example: + // m = 12345xxx + // u = 12346xxx + upperdelta = 1 + case upperdelta == 1 && (m != '9' || u != '0'): + // Example: + // m = 1234598x + // u = 1234600x + upperdelta = 2 + } + // Okay to round up if upper has a different digit and either upper + // is inclusive or upper is bigger than the result of rounding up. + okup := upperdelta > 0 && (inclusive || upperdelta > 1 || ui+1 < upper.nd) + + // If it's okay to do either, then round to the nearest one. + // If it's okay to do only one, do it. + switch { + case okdown && okup: + d.Round(mi + 1) + return + case okdown: + d.RoundDown(mi + 1) + return + case okup: + d.RoundUp(mi + 1) + return + } + } +} diff --git a/vendor/k8s.io/cli-runtime/LICENSE b/vendor/k8s.io/cli-runtime/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/k8s.io/cli-runtime/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go b/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go new file mode 100644 index 000000000..247b1c2ee --- /dev/null +++ b/vendor/k8s.io/cli-runtime/pkg/genericiooptions/io_options.go @@ -0,0 +1,56 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package genericiooptions + +import ( + "bytes" + "io" +) + +// IOStreams provides the standard names for iostreams. This is useful for embedding and for unit testing. +// Inconsistent and different names make it hard to read and review code +type IOStreams struct { + // In think, os.Stdin + In io.Reader + // Out think, os.Stdout + Out io.Writer + // ErrOut think, os.Stderr + ErrOut io.Writer +} + +// NewTestIOStreams returns a valid IOStreams and in, out, errout buffers for unit tests +func NewTestIOStreams() (IOStreams, *bytes.Buffer, *bytes.Buffer, *bytes.Buffer) { + in := &bytes.Buffer{} + out := &bytes.Buffer{} + errOut := &bytes.Buffer{} + + return IOStreams{ + In: in, + Out: out, + ErrOut: errOut, + }, in, out, errOut +} + +// NewTestIOStreamsDiscard returns a valid IOStreams that just discards +func NewTestIOStreamsDiscard() IOStreams { + in := &bytes.Buffer{} + return IOStreams{ + In: in, + Out: io.Discard, + ErrOut: io.Discard, + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index e5da3fe68..8971d9f6f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,12 @@ cel.dev/expr # github.com/NYTimes/gziphandler v1.1.1 ## explicit; go 1.11 github.com/NYTimes/gziphandler +# github.com/PaesslerAG/gval v1.2.3 +## explicit; go 1.15 +github.com/PaesslerAG/gval +# github.com/PaesslerAG/jsonpath v0.1.1 +## explicit +github.com/PaesslerAG/jsonpath # github.com/antlr4-go/antlr/v4 v4.13.0 ## explicit; go 1.20 github.com/antlr4-go/antlr/v4 @@ -341,6 +347,7 @@ github.com/openshift/library-go/pkg/controller/factory github.com/openshift/library-go/pkg/controller/fileobserver github.com/openshift/library-go/pkg/controller/manager github.com/openshift/library-go/pkg/crypto +github.com/openshift/library-go/pkg/manifestclient github.com/openshift/library-go/pkg/network github.com/openshift/library-go/pkg/operator/certrotation github.com/openshift/library-go/pkg/operator/condition @@ -396,6 +403,10 @@ github.com/openshift/library-go/pkg/operator/v1helpers github.com/openshift/library-go/pkg/serviceability github.com/openshift/library-go/test/library github.com/openshift/library-go/test/library/metrics +# github.com/openshift/multi-operator-manager v0.0.0-20250930141021-05cb0b9abdb4 +## explicit; go 1.24.0 +github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources +github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors @@ -434,6 +445,9 @@ github.com/prometheus/procfs/internal/util # github.com/robfig/cron v1.2.0 ## explicit github.com/robfig/cron +# github.com/shopspring/decimal v1.3.1 +## explicit; go 1.13 +github.com/shopspring/decimal # github.com/sirupsen/logrus v1.9.3 ## explicit; go 1.13 github.com/sirupsen/logrus @@ -1071,6 +1085,9 @@ k8s.io/apiserver/plugin/pkg/audit/webhook k8s.io/apiserver/plugin/pkg/authenticator/token/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook k8s.io/apiserver/plugin/pkg/authorizer/webhook/metrics +# k8s.io/cli-runtime v0.30.2 +## explicit; go 1.22.0 +k8s.io/cli-runtime/pkg/genericiooptions # k8s.io/client-go v0.34.1 ## explicit; go 1.24.0 k8s.io/client-go/applyconfigurations From 026d756413187c98bbbe6641686db980063b0cc8 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:05:43 -0300 Subject: [PATCH 2/8] Add OM input-resources command stub Generated with Claude Code --- .../main.go | 5 +++ pkg/cmd/mom/input_resources_command.go | 40 +++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 pkg/cmd/mom/input_resources_command.go diff --git a/cmd/cluster-kube-controller-manager-operator/main.go b/cmd/cluster-kube-controller-manager-operator/main.go index 7a3640cd8..f56c8c777 100644 --- a/cmd/cluster-kube-controller-manager-operator/main.go +++ b/cmd/cluster-kube-controller-manager-operator/main.go @@ -6,6 +6,7 @@ import ( "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" "k8s.io/component-base/cli" "github.com/openshift/library-go/pkg/operator/staticpod/certsyncpod" @@ -16,6 +17,7 @@ import ( "github.com/openshift/cluster-kube-controller-manager-operator/pkg/cmd/recoverycontroller" "github.com/openshift/cluster-kube-controller-manager-operator/pkg/cmd/render" "github.com/openshift/cluster-kube-controller-manager-operator/pkg/cmd/resourcegraph" + "github.com/openshift/cluster-kube-controller-manager-operator/pkg/cmd/mom" "github.com/openshift/cluster-kube-controller-manager-operator/pkg/operator" ) @@ -35,6 +37,8 @@ func NewSSCSCommand(ctx context.Context) *cobra.Command { }, } + ioStreams := genericiooptions.IOStreams{In: os.Stdin, Out: os.Stdout, ErrOut: os.Stderr} + cmd.AddCommand(operatorcmd.NewOperator()) cmd.AddCommand(render.NewRenderCommand(nil)) cmd.AddCommand(installerpod.NewInstaller(ctx)) @@ -42,6 +46,7 @@ func NewSSCSCommand(ctx context.Context) *cobra.Command { cmd.AddCommand(resourcegraph.NewResourceChainCommand()) cmd.AddCommand(certsyncpod.NewCertSyncControllerCommand(operator.CertConfigMaps, operator.CertSecrets)) cmd.AddCommand(recoverycontroller.NewCertRecoveryControllerCommand(ctx)) + cmd.AddCommand(mom.NewInputResourcesCommand(ioStreams)) return cmd } diff --git a/pkg/cmd/mom/input_resources_command.go b/pkg/cmd/mom/input_resources_command.go new file mode 100644 index 000000000..9fd02197d --- /dev/null +++ b/pkg/cmd/mom/input_resources_command.go @@ -0,0 +1,40 @@ +package mom + +import ( + "context" + + "github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +func NewInputResourcesCommand(streams genericiooptions.IOStreams) *cobra.Command { + return libraryinputresources.NewInputResourcesCommand(runInputResources, runOutputResources, streams) +} + +func runInputResources(ctx context.Context) (*libraryinputresources.InputResources, error) { + return &libraryinputresources.InputResources{ + ApplyConfigurationResources: libraryinputresources.ResourceList{ + ExactResources: []libraryinputresources.ExactResourceID{ + // TODO: Fill in discovered resources + }, + }, + }, nil +} + +// runOutputResources is defined here to support the input-resources command +// The actual implementation will be in output_resources_command.go +func runOutputResources(ctx context.Context) (*libraryoutputresources.OutputResources, error) { + return &libraryoutputresources.OutputResources{ + ConfigurationResources: libraryoutputresources.ResourceList{ + ExactResources: []libraryoutputresources.ExactResourceID{}, + }, + ManagementResources: libraryoutputresources.ResourceList{ + ExactResources: []libraryoutputresources.ExactResourceID{}, + }, + UserWorkloadResources: libraryoutputresources.ResourceList{ + ExactResources: []libraryoutputresources.ExactResourceID{}, + }, + }, nil +} From 020ebf63416094897d4c4631da9226ee6343b3e6 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:06:42 -0300 Subject: [PATCH 3/8] Populate OM input-resources with discovered resources Generated with Claude Code --- pkg/cmd/mom/input_resources_command.go | 28 +++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/pkg/cmd/mom/input_resources_command.go b/pkg/cmd/mom/input_resources_command.go index 9fd02197d..ccde866f1 100644 --- a/pkg/cmd/mom/input_resources_command.go +++ b/pkg/cmd/mom/input_resources_command.go @@ -17,7 +17,33 @@ func runInputResources(ctx context.Context) (*libraryinputresources.InputResourc return &libraryinputresources.InputResources{ ApplyConfigurationResources: libraryinputresources.ResourceList{ ExactResources: []libraryinputresources.ExactResourceID{ - // TODO: Fill in discovered resources + // Operator CR + libraryinputresources.ExactLowLevelOperator("kubecontrollermanagers"), + + // Config resources + libraryinputresources.ExactConfigResource("infrastructures"), + libraryinputresources.ExactConfigResource("networks"), + libraryinputresources.ExactConfigResource("featuregates"), + libraryinputresources.ExactConfigResource("nodes"), + libraryinputresources.ExactConfigResource("proxies"), + libraryinputresources.ExactConfigResource("apiservers"), + libraryinputresources.ExactConfigResource("clusterversions"), + + // Namespaces + libraryinputresources.ExactNamespace("openshift-config"), + libraryinputresources.ExactNamespace("openshift-config-managed"), + libraryinputresources.ExactNamespace("openshift-kube-controller-manager"), + libraryinputresources.ExactNamespace("openshift-kube-controller-manager-operator"), + libraryinputresources.ExactNamespace("kube-system"), + libraryinputresources.ExactNamespace("openshift-infra"), + + // ConfigMaps that may be synced or referenced + libraryinputresources.ExactConfigMap("openshift-config", "cloud-provider-config"), + libraryinputresources.ExactConfigMap("openshift-config-managed", "kube-controller-cert-syncer-kubeconfig"), + libraryinputresources.ExactConfigMap("kube-system", "cluster-config-v1"), + + // Secrets that may be synced or referenced + libraryinputresources.ExactSecret("openshift-config", "cloud-credentials"), }, }, }, nil From e175379ee024f893fb0f060cd40f87ba06f19767 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:07:44 -0300 Subject: [PATCH 4/8] Add OM output-resources command stub Generated with Claude Code --- cmd/cluster-kube-controller-manager-operator/main.go | 1 + pkg/cmd/mom/output_resources_command.go | 11 +++++++++++ 2 files changed, 12 insertions(+) create mode 100644 pkg/cmd/mom/output_resources_command.go diff --git a/cmd/cluster-kube-controller-manager-operator/main.go b/cmd/cluster-kube-controller-manager-operator/main.go index f56c8c777..5aaf5634b 100644 --- a/cmd/cluster-kube-controller-manager-operator/main.go +++ b/cmd/cluster-kube-controller-manager-operator/main.go @@ -47,6 +47,7 @@ func NewSSCSCommand(ctx context.Context) *cobra.Command { cmd.AddCommand(certsyncpod.NewCertSyncControllerCommand(operator.CertConfigMaps, operator.CertSecrets)) cmd.AddCommand(recoverycontroller.NewCertRecoveryControllerCommand(ctx)) cmd.AddCommand(mom.NewInputResourcesCommand(ioStreams)) + cmd.AddCommand(mom.NewOutputResourcesCommand(ioStreams)) return cmd } diff --git a/pkg/cmd/mom/output_resources_command.go b/pkg/cmd/mom/output_resources_command.go new file mode 100644 index 000000000..eadd0448f --- /dev/null +++ b/pkg/cmd/mom/output_resources_command.go @@ -0,0 +1,11 @@ +package mom + +import ( + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +func NewOutputResourcesCommand(streams genericiooptions.IOStreams) *cobra.Command { + return libraryoutputresources.NewOutputResourcesCommand(runOutputResources, streams) +} From 759f3625d7c59a22e52b2028bb71071a41540f13 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:09:07 -0300 Subject: [PATCH 5/8] Populate OM output-resources with discovered resources Generated with Claude Code --- pkg/cmd/mom/input_resources_command.go | 78 +++++++++++++++++++++++++- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/pkg/cmd/mom/input_resources_command.go b/pkg/cmd/mom/input_resources_command.go index ccde866f1..3cf9a020a 100644 --- a/pkg/cmd/mom/input_resources_command.go +++ b/pkg/cmd/mom/input_resources_command.go @@ -50,17 +50,89 @@ func runInputResources(ctx context.Context) (*libraryinputresources.InputResourc } // runOutputResources is defined here to support the input-resources command -// The actual implementation will be in output_resources_command.go +// This is shared with output_resources_command.go func runOutputResources(ctx context.Context) (*libraryoutputresources.OutputResources, error) { return &libraryoutputresources.OutputResources{ ConfigurationResources: libraryoutputresources.ResourceList{ ExactResources: []libraryoutputresources.ExactResourceID{}, }, ManagementResources: libraryoutputresources.ResourceList{ - ExactResources: []libraryoutputresources.ExactResourceID{}, + ExactResources: []libraryoutputresources.ExactResourceID{ + // ClusterOperator status + libraryoutputresources.ExactClusterOperator("kube-controller-manager"), + + // Namespaces managed by the operator + libraryoutputresources.ExactNamespace("openshift-kube-controller-manager"), + libraryoutputresources.ExactNamespace("openshift-kube-controller-manager-operator"), + libraryoutputresources.ExactNamespace("openshift-infra"), + + // Operator deployment and service + libraryoutputresources.ExactDeployment("openshift-kube-controller-manager-operator", "kube-controller-manager-operator"), + libraryoutputresources.ExactService("openshift-kube-controller-manager-operator", "kube-controller-manager-operator"), + libraryoutputresources.ExactServiceAccount("openshift-kube-controller-manager-operator", "kube-controller-manager-operator"), + + // Static pod resources in target namespace + libraryoutputresources.ExactService("openshift-kube-controller-manager", "kube-controller-manager"), + libraryoutputresources.ExactServiceAccount("openshift-kube-controller-manager", "kube-controller-manager"), + libraryoutputresources.ExactServiceAccount("openshift-kube-controller-manager", "localhost-recovery-client"), + libraryoutputresources.ExactServiceAccount("openshift-kube-controller-manager", "kube-controller-manager-sa"), + + // ConfigMaps + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "config"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "kube-controller-manager-pod"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "cluster-policy-controller-config"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "controller-manager-kubeconfig"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "kube-controller-cert-syncer-kubeconfig"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "serviceaccount-ca"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "service-ca"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "recycler-config"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "trusted-ca-bundle"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "aggregator-client-ca"), + libraryoutputresources.ExactConfigMap("openshift-kube-controller-manager", "client-ca"), + + // Secrets + libraryoutputresources.ExactSecret("openshift-kube-controller-manager", "service-account-private-key"), + libraryoutputresources.ExactSecret("openshift-kube-controller-manager", "serving-cert"), + libraryoutputresources.ExactSecret("openshift-kube-controller-manager", "localhost-recovery-client-token"), + libraryoutputresources.ExactSecret("openshift-kube-controller-manager", "kube-controller-manager-client-cert-key"), + libraryoutputresources.ExactSecret("openshift-kube-controller-manager", "csr-signer"), + + // Roles and RoleBindings in target namespace + libraryoutputresources.ExactRole("kube-system", "system:openshift:controller:cluster-policy-controller"), + libraryoutputresources.ExactRoleBinding("kube-system", "system:openshift:controller:cluster-policy-controller"), + + // PodDisruptionBudget + libraryoutputresources.ExactPDB("openshift-kube-controller-manager-operator", "kube-controller-manager-operator"), + }, + EventingNamespaces: []string{ + "openshift-kube-controller-manager", + "openshift-kube-controller-manager-operator", + }, }, UserWorkloadResources: libraryoutputresources.ResourceList{ - ExactResources: []libraryoutputresources.ExactResourceID{}, + ExactResources: []libraryoutputresources.ExactResourceID{ + // CSR-related resources + libraryoutputresources.ExactClusterRole("system:openshift:controller:cluster-csr-approver"), + libraryoutputresources.ExactClusterRoleBinding("system:openshift:controller:cluster-csr-approver"), + + // Namespace security allocation controller + libraryoutputresources.ExactClusterRole("system:openshift:controller:namespace-security-allocation-controller"), + libraryoutputresources.ExactClusterRoleBinding("system:openshift:controller:namespace-security-allocation-controller"), + + // PodSecurity admission label syncer controller + libraryoutputresources.ExactClusterRole("system:openshift:controller:podsecurity-admission-label-syncer-controller"), + libraryoutputresources.ExactClusterRoleBinding("system:openshift:controller:podsecurity-admission-label-syncer-controller"), + + // PodSecurity admission label privileged namespaces syncer controller + libraryoutputresources.ExactClusterRole("system:openshift:controller:podsecurity-admission-label-privileged-namespaces-syncer-controller"), + libraryoutputresources.ExactClusterRoleBinding("system:openshift:controller:podsecurity-admission-label-privileged-namespaces-syncer-controller"), + + // Localhost recovery + libraryoutputresources.ExactClusterRoleBinding("system:openshift:operator:kube-controller-manager-recovery"), + + // Operator RBAC + libraryoutputresources.ExactClusterRoleBinding("system:openshift:operator:kube-controller-manager-operator"), + }, }, }, nil } From 5cb80d24c620b23029477d8f702fba2970a5d3bf Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:10:39 -0300 Subject: [PATCH 6/8] Vendor libraryapplyconfiguration Generated with Claude Code --- .../pkg/flagtypes/time.go | 53 +++ .../libraryapplyconfiguration/README.md | 4 + .../apply_configuration.go | 108 +++++ .../client_mutations.go | 177 +++++++++ .../libraryapplyconfiguration/command.go | 145 +++++++ .../directory_apply_configuration.go | 181 +++++++++ .../directory_mutations.go | 74 ++++ .../libraryapplyconfiguration/equivalence.go | 239 +++++++++++ .../operator_launch_helpers.go | 286 ++++++++++++++ .../libraryapplyconfiguration/options.go | 83 ++++ .../libraryapplyconfiguration/resources.go | 9 + .../libraryapplyconfiguration/types.go | 54 +++ .../libraryapplyconfiguration/write.go | 1 + .../k8s.io/utils/clock/testing/fake_clock.go | 374 ++++++++++++++++++ .../clock/testing/simple_interval_clock.go | 44 +++ vendor/modules.txt | 3 + 16 files changed, 1835 insertions(+) create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/flagtypes/time.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/README.md create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/apply_configuration.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/client_mutations.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/command.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_apply_configuration.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_mutations.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/equivalence.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/operator_launch_helpers.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/options.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/resources.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/types.go create mode 100644 vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/write.go create mode 100644 vendor/k8s.io/utils/clock/testing/fake_clock.go create mode 100644 vendor/k8s.io/utils/clock/testing/simple_interval_clock.go diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/flagtypes/time.go b/vendor/github.com/openshift/multi-operator-manager/pkg/flagtypes/time.go new file mode 100644 index 000000000..d002e0756 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/flagtypes/time.go @@ -0,0 +1,53 @@ +// pulled from https://github.com/spf13/pflag/pull/348 + +package flagtypes + +import ( + "fmt" + "strings" + "time" +) + +// TimeValue adapts time.Time for use as a flag. +type TimeValue struct { + *time.Time + formats []string +} + +func NewTimeValue(val time.Time, p *time.Time, formats []string) *TimeValue { + *p = val + return &TimeValue{ + Time: p, + formats: formats, + } +} + +// Set time.Time value from string based on accepted formats. +func (d *TimeValue) Set(s string) error { + s = strings.TrimSpace(s) + for _, f := range d.formats { + v, err := time.Parse(f, s) + if err != nil { + continue + } + *d.Time = v + return nil + } + + formatsString := "" + for i, f := range d.formats { + if i > 0 { + formatsString += ", " + } + formatsString += fmt.Sprintf("`%s`", f) + } + + return fmt.Errorf("invalid time format `%s` must be one of: %s", s, formatsString) +} + +// Type name for time.Time flags. +func (d *TimeValue) Type() string { + return "time" +} + +func (d *TimeValue) String() string { return d.Time.Format(time.RFC3339Nano) } diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/README.md b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/README.md new file mode 100644 index 000000000..903f0fed0 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/README.md @@ -0,0 +1,4 @@ +This package is something that is useful for building testing methodology will move to library-go eventually. + +Start by calling `func NewSampleOperatorApplyConfigurationCommand(applyConfigurationFn ApplyConfigurationFunc, streams genericiooptions.IOStreams) *cobra.Command {` +and adding that command as your `apply-configuration` command. \ No newline at end of file diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/apply_configuration.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/apply_configuration.go new file mode 100644 index 000000000..4c88330c6 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/apply_configuration.go @@ -0,0 +1,108 @@ +package libraryapplyconfiguration + +import ( + "errors" + "fmt" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + "path/filepath" + "strings" + + "github.com/openshift/library-go/pkg/manifestclient" + "k8s.io/apimachinery/pkg/util/sets" +) + +// MutationActionReader provides access to serialized mutation requests +type MutationActionReader interface { + ListActions() []manifestclient.Action + RequestsForAction(action manifestclient.Action) []manifestclient.SerializedRequestish + AllRequests() []manifestclient.SerializedRequestish +} + +// SingleClusterDesiredMutationGetter provides access to mutations targeted at a single type of cluster +type SingleClusterDesiredMutationGetter interface { + GetClusterType() ClusterType + Requests() MutationActionReader +} + +// AllDesiredMutationsGetter provides access to mutations targeted at all available types of clusters +type AllDesiredMutationsGetter interface { + MutationsForClusterType(clusterType ClusterType) SingleClusterDesiredMutationGetter +} + +type applyConfiguration struct { + desiredMutationsByClusterType map[ClusterType]SingleClusterDesiredMutationGetter +} + +var ( + _ AllDesiredMutationsGetter = &applyConfiguration{} +) + +func UnspecifiedOutputResources(allDesiredMutationsGetter AllDesiredMutationsGetter, allAllowedOutputResources *libraryoutputresources.OutputResources) []manifestclient.SerializedRequestish { + allMutationRequests := []manifestclient.SerializedRequestish{} + for _, clusterType := range sets.List(AllClusterTypes) { + desiredMutationsGetter := allDesiredMutationsGetter.MutationsForClusterType(clusterType) + if desiredMutationsGetter != nil { + allMutationRequests = append(allMutationRequests, desiredMutationsGetter.Requests().AllRequests()...) + } + } + + combinedList := &libraryoutputresources.ResourceList{} + combinedList.ExactResources = append(combinedList.ExactResources, allAllowedOutputResources.ConfigurationResources.ExactResources...) + combinedList.ExactResources = append(combinedList.ExactResources, allAllowedOutputResources.ManagementResources.ExactResources...) + combinedList.ExactResources = append(combinedList.ExactResources, allAllowedOutputResources.UserWorkloadResources.ExactResources...) + combinedList.GeneratedNameResources = append(combinedList.GeneratedNameResources, allAllowedOutputResources.ConfigurationResources.GeneratedNameResources...) + combinedList.GeneratedNameResources = append(combinedList.GeneratedNameResources, allAllowedOutputResources.ManagementResources.GeneratedNameResources...) + combinedList.GeneratedNameResources = append(combinedList.GeneratedNameResources, allAllowedOutputResources.UserWorkloadResources.GeneratedNameResources...) + combinedList.EventingNamespaces = append(combinedList.EventingNamespaces, allAllowedOutputResources.ConfigurationResources.EventingNamespaces...) + combinedList.EventingNamespaces = append(combinedList.EventingNamespaces, allAllowedOutputResources.ManagementResources.EventingNamespaces...) + combinedList.EventingNamespaces = append(combinedList.EventingNamespaces, allAllowedOutputResources.UserWorkloadResources.EventingNamespaces...) + filteredMutationRequests := FilterSerializedRequests(allMutationRequests, combinedList) + + return manifestclient.DifferenceOfSerializedRequests(allMutationRequests, filteredMutationRequests) +} + +func ValidateAllDesiredMutationsGetter(allDesiredMutationsGetter AllDesiredMutationsGetter, allAllowedOutputResources *libraryoutputresources.OutputResources) error { + errs := []error{} + + if allDesiredMutationsGetter == nil { + return fmt.Errorf("applyConfiguration is required") + } + + unspecifiedOutputResources := UnspecifiedOutputResources(allDesiredMutationsGetter, allAllowedOutputResources) + if len(unspecifiedOutputResources) > 0 { + unspecifiedOutputIdentifiers := []string{} + for _, curr := range unspecifiedOutputResources { + unspecifiedOutputIdentifiers = append(unspecifiedOutputIdentifiers, curr.GetSerializedRequest().StringID()) + } + errs = append(errs, fmt.Errorf("%d output-resource were produced, but not present in the specified output: %v", len(unspecifiedOutputIdentifiers), strings.Join(unspecifiedOutputIdentifiers, ", "))) + } + + return errors.Join(errs...) +} + +func WriteApplyConfiguration(desiredApplyConfiguration AllDesiredMutationsGetter, outputDirectory string) error { + errs := []error{} + + for _, clusterType := range sets.List(AllClusterTypes) { + desiredMutations := desiredApplyConfiguration.MutationsForClusterType(clusterType) + err := manifestclient.WriteMutationDirectory(filepath.Join(outputDirectory, string(clusterType)), desiredMutations.Requests().AllRequests()...) + if err != nil { + errs = append(errs, fmt.Errorf("failed writing requests for %q: %w", clusterType, err)) + } + } + + return errors.Join(errs...) +} + +func (s *applyConfiguration) MutationsForClusterType(clusterType ClusterType) SingleClusterDesiredMutationGetter { + return s.desiredMutationsByClusterType[clusterType] +} + +type ClusterType string + +var ( + ClusterTypeConfiguration ClusterType = "Configuration" + ClusterTypeManagement ClusterType = "Management" + ClusterTypeUserWorkload ClusterType = "UserWorkload" + AllClusterTypes = sets.New(ClusterTypeConfiguration, ClusterTypeManagement, ClusterTypeUserWorkload) +) diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/client_mutations.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/client_mutations.go new file mode 100644 index 000000000..20ca35c0a --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/client_mutations.go @@ -0,0 +1,177 @@ +package libraryapplyconfiguration + +import ( + "fmt" + + "github.com/openshift/library-go/pkg/manifestclient" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" +) + +type clientBasedClusterApplyResult struct { + clusterType ClusterType + + mutationTracker *manifestclient.AllActionsTracker[manifestclient.TrackedSerializedRequest] +} + +var ( + _ SingleClusterDesiredMutationGetter = &clientBasedClusterApplyResult{} +) + +func (s *clientBasedClusterApplyResult) GetClusterType() ClusterType { + return s.clusterType +} + +func (s *clientBasedClusterApplyResult) Requests() MutationActionReader { + return s.mutationTracker +} + +func NewApplyConfigurationFromClient( + mutationTracker *manifestclient.AllActionsTracker[manifestclient.TrackedSerializedRequest], +) *applyConfiguration { + ret := &applyConfiguration{ + desiredMutationsByClusterType: map[ClusterType]SingleClusterDesiredMutationGetter{}, + } + for clusterType := range AllClusterTypes { + ret.desiredMutationsByClusterType[clusterType] = &clientBasedClusterApplyResult{ + clusterType: clusterType, + mutationTracker: mutationTracker, + } + } + + return ret +} + +func FilterAllDesiredMutationsGetter( + in AllDesiredMutationsGetter, + allAllowedOutputResources *libraryoutputresources.OutputResources, +) AllDesiredMutationsGetter { + ret := &applyConfiguration{ + desiredMutationsByClusterType: map[ClusterType]SingleClusterDesiredMutationGetter{}, + } + + for clusterType := range AllClusterTypes { + var clusterTypeFilter *libraryoutputresources.ResourceList + if allAllowedOutputResources != nil { + switch clusterType { + case ClusterTypeConfiguration: + clusterTypeFilter = &allAllowedOutputResources.ConfigurationResources + case ClusterTypeManagement: + clusterTypeFilter = &allAllowedOutputResources.ManagementResources + case ClusterTypeUserWorkload: + clusterTypeFilter = &allAllowedOutputResources.UserWorkloadResources + default: + panic(fmt.Sprintf("coding error: %q", clusterType)) + } + } + + ret.desiredMutationsByClusterType[clusterType] = &filteringSingleClusterDesiredMutationGetter{ + delegate: in.MutationsForClusterType(clusterType), + resourceList: clusterTypeFilter, + } + } + + return ret +} + +type filteringSingleClusterDesiredMutationGetter struct { + delegate SingleClusterDesiredMutationGetter + resourceList *libraryoutputresources.ResourceList +} + +func (f filteringSingleClusterDesiredMutationGetter) GetClusterType() ClusterType { + return f.delegate.GetClusterType() +} + +func (f filteringSingleClusterDesiredMutationGetter) Requests() MutationActionReader { + return &filteringMutationActionReader{ + delegate: f.delegate.Requests(), + resourceList: f.resourceList, + } +} + +var ( + _ SingleClusterDesiredMutationGetter = filteringSingleClusterDesiredMutationGetter{} + _ MutationActionReader = &filteringMutationActionReader{} +) + +type filteringMutationActionReader struct { + delegate MutationActionReader + resourceList *libraryoutputresources.ResourceList +} + +func (f filteringMutationActionReader) ListActions() []manifestclient.Action { + return f.delegate.ListActions() +} + +func (f filteringMutationActionReader) RequestsForAction(action manifestclient.Action) []manifestclient.SerializedRequestish { + return FilterSerializedRequests(f.delegate.RequestsForAction(action), f.resourceList) +} + +func (f filteringMutationActionReader) AllRequests() []manifestclient.SerializedRequestish { + return FilterSerializedRequests(f.delegate.AllRequests(), f.resourceList) +} + +func RemoveEvents(requests []manifestclient.SerializedRequestish) []manifestclient.SerializedRequestish { + filteredRequests := []manifestclient.SerializedRequestish{} + + for _, curr := range requests { + metadata := curr.GetSerializedRequest().GetLookupMetadata() + if isEventResource(metadata.ResourceType.GroupResource()) { + continue + } + filteredRequests = append(filteredRequests, curr) + } + return filteredRequests +} + +func FilterSerializedRequests(requests []manifestclient.SerializedRequestish, allowedResources *libraryoutputresources.ResourceList) []manifestclient.SerializedRequestish { + filteredRequests := []manifestclient.SerializedRequestish{} + + for _, curr := range requests { + metadata := curr.GetSerializedRequest().GetLookupMetadata() + if metadataMatchesFilter(metadata, allowedResources) { + filteredRequests = append(filteredRequests, curr) + } + } + return filteredRequests +} + +func metadataMatchesFilter(metadata manifestclient.ActionMetadata, allowedResources *libraryoutputresources.ResourceList) bool { + if allowedResources == nil { + return true + } + + gr := metadata.ResourceType.GroupResource() + if isEventResource(gr) { + for _, curr := range allowedResources.EventingNamespaces { + if metadata.Namespace == curr { + return true + } + } + } + + for _, curr := range allowedResources.ExactResources { + if len(metadata.GenerateName) > 0 { + continue + } + if metadata.ResourceType.Group == curr.Group && + metadata.ResourceType.Resource == curr.Resource && + metadata.Namespace == curr.Namespace && + metadata.Name == curr.Name { + return true + } + } + for _, curr := range allowedResources.GeneratedNameResources { + if len(metadata.Name) > 0 { + continue + } + if metadata.ResourceType.Group == curr.Group && + metadata.ResourceType.Resource == curr.Resource && + metadata.Namespace == curr.Namespace && + metadata.GenerateName == curr.GeneratedName { + return true + } + } + + return false +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/command.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/command.go new file mode 100644 index 000000000..5dedf744f --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/command.go @@ -0,0 +1,145 @@ +package libraryapplyconfiguration + +import ( + "context" + "fmt" + "math/rand" + "time" + + "github.com/openshift/library-go/pkg/manifestclient" + "github.com/openshift/multi-operator-manager/pkg/flagtypes" + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" + "github.com/spf13/cobra" + "github.com/spf13/pflag" + "k8s.io/cli-runtime/pkg/genericiooptions" + "k8s.io/utils/clock" + clocktesting "k8s.io/utils/clock/testing" +) + +// ApplyConfigurationInput is provided to the ApplyConfigurationFunc +type ApplyConfigurationInput struct { + // MutationTrackingClient is offered as an alternative to the inputDirectory to make it easier to provide mocks to code. + // This forces all downstream code to rely on the client reading aspects and not grow an odd dependency to disk. + MutationTrackingClient manifestclient.MutationTrackingClient + + // Now is the declared time that this function was called at. It doesn't necessarily bear any relationship to + // the actual time. This is another aspect that makes unit and integration testing easier. + Clock clock.Clock + + // Streams is for I/O. The StdIn will usually be nil'd out. + Streams genericiooptions.IOStreams + + // Controllers holds a list of controller names to run. + Controllers []string +} + +// ApplyConfigurationFunc is a function called for applying configuration. +type ApplyConfigurationFunc func(ctx context.Context, applyConfigurationInput ApplyConfigurationInput) (*ApplyConfigurationRunResult, AllDesiredMutationsGetter, error) + +func NewApplyConfigurationCommand(applyConfigurationFn ApplyConfigurationFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + return newApplyConfigurationCommand(applyConfigurationFn, outputResourcesFn, streams) +} + +type applyConfigurationFlags struct { + applyConfigurationFn ApplyConfigurationFunc + outputResourcesFn libraryoutputresources.OutputResourcesFunc + + // InputDirectory is a directory that contains the must-gather formatted inputs + inputDirectory string + + // OutputDirectory is the directory to where output should be stored + outputDirectory string + + // controllers hold an optional list of controller names to run. + // '*' means "all controllers are enabled by default" + // 'foo' means "enable 'foo'" + // '-foo' means "disable 'foo'" + controllers []string + + now time.Time + + streams genericiooptions.IOStreams +} + +func newApplyConfigurationFlags(streams genericiooptions.IOStreams, applyConfigurationFn ApplyConfigurationFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc) *applyConfigurationFlags { + return &applyConfigurationFlags{ + applyConfigurationFn: applyConfigurationFn, + outputResourcesFn: outputResourcesFn, + now: time.Now(), + streams: streams, + } +} + +func newApplyConfigurationCommand(applyConfigurationFn ApplyConfigurationFunc, outputResourcesFn libraryoutputresources.OutputResourcesFunc, streams genericiooptions.IOStreams) *cobra.Command { + f := newApplyConfigurationFlags(streams, applyConfigurationFn, outputResourcesFn) + + cmd := &cobra.Command{ + Use: "apply-configuration", + Short: "Operator apply-configuration command.", + + SilenceUsage: true, + SilenceErrors: true, + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Fprintf(f.streams.ErrOut, "TODO output version\n") + fmt.Fprintf(f.streams.Out, "TODO output version\n") + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + if err := f.Validate(); err != nil { + return err + } + o, err := f.ToOptions(ctx) + if err != nil { + return err + } + rand.New(rand.NewSource(o.input.Clock.Now().UTC().UnixNano())) + if err := o.Run(ctx); err != nil { + return err + } + return nil + }, + } + + f.BindFlags(cmd.Flags()) + + return cmd +} + +func (f *applyConfigurationFlags) BindFlags(flags *pflag.FlagSet) { + flags.StringVar(&f.inputDirectory, "input-dir", f.inputDirectory, "The directory where the resource input is stored.") + flags.StringVar(&f.outputDirectory, "output-dir", f.outputDirectory, "The directory where the output is stored.") + flags.StringSliceVar(&f.controllers, "controllers", []string{"*"}, "A list of controllers to enable. '*' enables all controllers, 'foo' enables the controller named 'foo', '-foo' disables the controller named 'foo'. Default: `*`") + nowFlag := flagtypes.NewTimeValue(f.now, &f.now, []string{time.RFC3339}) + flags.Var(nowFlag, "now", "The time to use time.Now during this execution.") +} + +func (f *applyConfigurationFlags) Validate() error { + if len(f.inputDirectory) == 0 { + return fmt.Errorf("--input-dir is required") + } + if len(f.outputDirectory) == 0 { + return fmt.Errorf("--output-dir is required") + } + if f.now.IsZero() { + return fmt.Errorf("--now is required") + } + return nil +} + +func (f *applyConfigurationFlags) ToOptions(ctx context.Context) (*applyConfigurationOptions, error) { + momClient := manifestclient.NewHTTPClient(f.inputDirectory) + input := ApplyConfigurationInput{ + MutationTrackingClient: momClient, + Clock: clocktesting.NewFakeClock(f.now), + Controllers: f.controllers, + Streams: f.streams, + } + + return newApplyConfigurationOptions( + f.applyConfigurationFn, + f.outputResourcesFn, + input, + f.outputDirectory, + ), + nil +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_apply_configuration.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_apply_configuration.go new file mode 100644 index 000000000..72378eea5 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_apply_configuration.go @@ -0,0 +1,181 @@ +package libraryapplyconfiguration + +import ( + "errors" + "fmt" + "io/fs" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/yaml" + "os" + "path/filepath" +) + +type ApplyConfigurationResult interface { + Error() error + OutputDirectory() (string, error) + Stdout() string + Stderr() string + ControllerResults() *ApplyConfigurationRunResult + + AllDesiredMutationsGetter +} + +type simpleApplyConfigurationResult struct { + err error + outputDirectory string + stdout string + stderr string + controllerResults *ApplyConfigurationRunResult + + applyConfiguration *applyConfiguration +} + +var ( + _ AllDesiredMutationsGetter = &simpleApplyConfigurationResult{} + _ ApplyConfigurationResult = &simpleApplyConfigurationResult{} +) + +func NewApplyConfigurationResultFromDirectory(inFS fs.FS, outputDirectory string, execError error) (ApplyConfigurationResult, error) { + errs := []error{} + var err error + + stdoutContent := []byte{} + stdoutLocation := filepath.Join(outputDirectory, "stdout.log") + stdoutContent, err = fs.ReadFile(inFS, "stdout.log") + if err != nil && !os.IsNotExist(err) { + errs = append(errs, fmt.Errorf("failed reading %q: %w", stdoutLocation, err)) + } + // TODO stream through and preserve first and last to avoid memory explosion + if len(stdoutContent) > 512*1024 { + indexToStart := len(stdoutContent) - (512 * 1024) + stdoutContent = stdoutContent[indexToStart:] + } + + stderrContent := []byte{} + stderrLocation := filepath.Join(outputDirectory, "stderr.log") + stderrContent, err = fs.ReadFile(inFS, "stderr.log") + if err != nil && !os.IsNotExist(err) { + errs = append(errs, fmt.Errorf("failed reading %q: %w", stderrLocation, err)) + } + // TODO stream through and preserve first and last to avoid memory explosion + if len(stderrContent) > 512*1024 { + indexToStart := len(stderrContent) - (512 * 1024) + stderrContent = stderrContent[indexToStart:] + } + + var controllerResults *ApplyConfigurationRunResult + controllerResultsContent := []byte{} + controllerResultsLocation := filepath.Join(outputDirectory, "controller-results.yaml") + controllerResultsContent, err = fs.ReadFile(inFS, "controller-results.yaml") + if err != nil && !os.IsNotExist(err) { + errs = append(errs, fmt.Errorf("failed reading %q: %w", controllerResultsLocation, err)) + } + if len(controllerResultsContent) > 0 { + if asJSON, err := yaml.ToJSON(controllerResultsContent); err != nil { + errs = append(errs, fmt.Errorf("unable to convert controller-results.yaml to json: %w", err)) + } else { + localControllerResults := &ApplyConfigurationRunResult{} + if err := json.Unmarshal(asJSON, localControllerResults); err != nil { + errs = append(errs, fmt.Errorf("unable to parse controller-results.yaml: %w", err)) + } else { + controllerResults = localControllerResults + } + } + } + + outputContent, err := fs.ReadDir(inFS, ".") + switch { + case errors.Is(err, fs.ErrNotExist) && execError != nil: + return &simpleApplyConfigurationResult{ + stdout: string(stdoutContent), + stderr: string(stderrContent), + outputDirectory: outputDirectory, + + applyConfiguration: &applyConfiguration{}, + }, execError + + case errors.Is(err, fs.ErrNotExist) && execError == nil: + return nil, fmt.Errorf("unable to read output-dir content %q: %w", outputDirectory, err) + + case err != nil: + return nil, fmt.Errorf("unable to read output-dir content %q: %w", outputDirectory, err) + } + + // at this point we either + // 1. had an execError and we were able to read the directory + // 2. did not have an execError we were able to read the directory + + ret := &simpleApplyConfigurationResult{ + stdout: string(stdoutContent), + stderr: string(stderrContent), + controllerResults: controllerResults, + outputDirectory: outputDirectory, + applyConfiguration: &applyConfiguration{}, + } + ret.applyConfiguration, err = newApplyConfigurationFromDirectory(inFS, outputDirectory) + if err != nil { + errs = append(errs, fmt.Errorf("failure building applyConfiguration result: %w", err)) + } + + // check to be sure we don't have any extra content + for _, currContent := range outputContent { + if currContent.Name() == "stdout.log" { + continue + } + if currContent.Name() == "stderr.log" { + continue + } + if currContent.Name() == "controller-results.yaml" { + continue + } + + if !currContent.IsDir() { + errs = append(errs, fmt.Errorf("unexpected file %q, only target cluster directories are: %v", filepath.Join(outputDirectory, currContent.Name()), sets.List(AllClusterTypes))) + continue + } + if !AllClusterTypes.Has(ClusterType(currContent.Name())) { + errs = append(errs, fmt.Errorf("unexpected file %q, only target cluster directories are: %v", filepath.Join(outputDirectory, currContent.Name()), sets.List(AllClusterTypes))) + continue + } + } + + // if we had an exec error, be sure we add it to the list of failures. + if len(errs) == 0 && execError != nil { + return ret, execError + } + if len(errs) > 0 && execError != nil { + errs = append(errs, execError) + } + + ret.err = errors.Join(errs...) + if ret.err != nil { + // TODO may decide to disallow returning any info later + return ret, ret.err + } + return ret, nil +} + +func (s *simpleApplyConfigurationResult) Stdout() string { + return s.stdout +} + +func (s *simpleApplyConfigurationResult) Stderr() string { + return s.stderr +} + +func (s *simpleApplyConfigurationResult) Error() error { + return s.err +} + +func (s *simpleApplyConfigurationResult) ControllerResults() *ApplyConfigurationRunResult { + return s.controllerResults +} + +func (s *simpleApplyConfigurationResult) OutputDirectory() (string, error) { + return s.outputDirectory, nil +} + +func (s *simpleApplyConfigurationResult) MutationsForClusterType(clusterType ClusterType) SingleClusterDesiredMutationGetter { + return s.applyConfiguration.MutationsForClusterType(clusterType) +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_mutations.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_mutations.go new file mode 100644 index 000000000..ad3e2f133 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/directory_mutations.go @@ -0,0 +1,74 @@ +package libraryapplyconfiguration + +import ( + "errors" + "fmt" + "github.com/openshift/library-go/pkg/manifestclient" + "io/fs" + "path/filepath" +) + +type directoryBasedClusterApplyResult struct { + clusterType ClusterType + + // outputDirectory is useful for debugging where content comes from + outputDirectory string + + allRequests *manifestclient.AllActionsTracker[manifestclient.FileOriginatedSerializedRequest] +} + +var ( + _ SingleClusterDesiredMutationGetter = &directoryBasedClusterApplyResult{} +) + +func (s *directoryBasedClusterApplyResult) GetClusterType() ClusterType { + return s.clusterType +} + +func (s *directoryBasedClusterApplyResult) Requests() MutationActionReader { + return s.allRequests +} + +// newApplyConfigurationFromDirectory takes a standard output directory, selects the subdirectory for the clusterType, and consumes the +// content inside that directory. +// All files can be either json or yaml. +func newApplyConfigurationFromDirectory(inFS fs.FS, outputDirectory string) (*applyConfiguration, error) { + ret := &applyConfiguration{ + desiredMutationsByClusterType: map[ClusterType]SingleClusterDesiredMutationGetter{}, + } + + errs := []error{} + var err error + for clusterType := range AllClusterTypes { + ret.desiredMutationsByClusterType[clusterType], err = newApplyResultFromDirectory(clusterType, inFS, outputDirectory) + if err != nil { + errs = append(errs, fmt.Errorf("failure building %q result: %w", clusterType, err)) + } + } + if len(errs) > 0 { + return nil, errors.Join(errs...) + } + + return ret, nil +} + +func newApplyResultFromDirectory(clusterType ClusterType, inFS fs.FS, outputDirectory string) (*directoryBasedClusterApplyResult, error) { + clusterTypeDirName := filepath.Join(outputDirectory, string(clusterType)) + clusterTypeDir, err := fs.Sub(inFS, string(clusterType)) + if err != nil { + return nil, fmt.Errorf("unable to get subDir: %w", err) + } + + mutationRequests, err := manifestclient.ReadEmbeddedMutationDirectory(clusterTypeDir) + if err != nil { + return nil, fmt.Errorf("unable to read actions for clusterType=%q in %q: %w", clusterType, clusterTypeDirName, err) + } + + ret := &directoryBasedClusterApplyResult{ + clusterType: clusterType, + outputDirectory: outputDirectory, + allRequests: mutationRequests, + } + + return ret, nil +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/equivalence.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/equivalence.go new file mode 100644 index 000000000..1a670776b --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/equivalence.go @@ -0,0 +1,239 @@ +package libraryapplyconfiguration + +import ( + "bytes" + "fmt" + "reflect" + + "github.com/google/go-cmp/cmp" + "github.com/openshift/library-go/pkg/manifestclient" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/yaml" +) + +func EquivalentApplyConfigurationResultIgnoringEvents(lhs, rhs ApplyConfigurationResult) []string { + reasons := []string{} + reasons = append(reasons, equivalentErrors("Error", lhs.Error(), rhs.Error())...) + reasons = append(reasons, equivalentRunResults("ControllerResults", lhs.ControllerResults(), rhs.ControllerResults())...) + + for _, clusterType := range sets.List(AllClusterTypes) { + currLHS := lhs.MutationsForClusterType(clusterType) + currRHS := rhs.MutationsForClusterType(clusterType) + reasons = append(reasons, EquivalentClusterApplyResultIgnoringEvents(string(clusterType), currLHS, currRHS)...) + } + + return reasons +} + +func equivalentErrors(field string, lhs, rhs error) []string { + reasons := []string{} + switch { + case lhs == nil && rhs == nil: + case lhs == nil && rhs != nil: + reasons = append(reasons, fmt.Sprintf("%v: lhs=nil, rhs=%v", field, rhs)) + case lhs != nil && rhs == nil: + reasons = append(reasons, fmt.Sprintf("%v: lhs=%v, rhs=nil", field, lhs)) + case lhs.Error() != rhs.Error(): + reasons = append(reasons, fmt.Sprintf("%v: lhs=%v, rhs=%v", field, lhs, rhs)) + } + + return reasons +} + +func equivalentRunResults(field string, lhs, rhs *ApplyConfigurationRunResult) []string { + reasons := []string{} + switch { + case lhs == nil && rhs == nil: + case lhs == nil && rhs != nil: + reasons = append(reasons, fmt.Sprintf("%v: lhs=nil, rhs=%v", field, rhs)) + case lhs != nil && rhs == nil: + reasons = append(reasons, fmt.Sprintf("%v: lhs=%v, rhs=nil", field, lhs)) + default: + if !reflect.DeepEqual(lhs, rhs) { + reasons = append(reasons, fmt.Sprintf("%v: diff: %v", field, cmp.Diff(lhs, rhs))) + } + } + + return reasons +} + +func EquivalentClusterApplyResultIgnoringEvents(field string, lhs, rhs SingleClusterDesiredMutationGetter) []string { + switch { + case lhs == nil && rhs == nil: + return nil + case lhs == nil && rhs != nil: + return []string{fmt.Sprintf("%v: lhs=nil, len(rhs)=%v", field, len(rhs.Requests().AllRequests()))} + case lhs != nil && rhs == nil: + return []string{fmt.Sprintf("%v: len(lhs)=%v, rhs=nil", field, len(lhs.Requests().AllRequests()))} + case lhs != nil && rhs != nil: + // check the rest + } + + lhsAllRequests := RemoveEvents(lhs.Requests().AllRequests()) + rhsAllRequests := RemoveEvents(rhs.Requests().AllRequests()) + + // TODO different method with prettier message + equivalent, missingInRHS, missingInLHS := manifestclient.AreAllSerializedRequestsEquivalentWithReasons(lhsAllRequests, rhsAllRequests) + if equivalent { + return nil + } + + reasons := []string{} + reasons = append(reasons, reasonForDiff("rhs", missingInRHS, rhsAllRequests)...) + + uniquelyMissingInLHS := []manifestclient.SerializedRequest{} + for _, currMissingInLHS := range missingInLHS { + lhsMetadata := expandedMetadataFor(currMissingInLHS.GetSerializedRequest()) + found := false + for _, currMissingInRHS := range missingInRHS { + rhsMetadata := expandedMetadataFor(currMissingInRHS.GetSerializedRequest()) + if lhsMetadata == rhsMetadata { + found = true + break + } + } + if !found { + uniquelyMissingInLHS = append(uniquelyMissingInLHS, currMissingInLHS) + } + } + reasons = append(reasons, reasonForDiff("lhs", uniquelyMissingInLHS, lhsAllRequests)...) + + qualifiedReasons := []string{} + for _, curr := range reasons { + qualifiedReasons = append(qualifiedReasons, fmt.Sprintf("%s: %s", field, curr)) + } + return qualifiedReasons +} + +// expandedMetadata is useful for describing diffs, potentially to get pushed into manifestclient +type expandedMetadata struct { + metadata manifestclient.ActionMetadata + fieldManager string + controllerInstanceName string +} + +func expandedMetadataFor(serializedRequest *manifestclient.SerializedRequest) expandedMetadata { + if serializedRequest == nil { + return expandedMetadata{} + } + metadata := serializedRequest.GetLookupMetadata() + fieldManager := "" + controllerInstanceName := "" + + isApply := serializedRequest.Action == manifestclient.ActionApply || serializedRequest.Action == manifestclient.ActionApplyStatus + if isApply { + lhsOptions := &metav1.ApplyOptions{} + if err := yaml.Unmarshal(serializedRequest.Options, lhsOptions); err == nil { + // ignore err. if it doesn't work we get the zero value and that's ok + fieldManager = lhsOptions.FieldManager + } + } + + bodyObj := &unstructured.Unstructured{ + Object: map[string]interface{}{}, + } + if err := yaml.Unmarshal(serializedRequest.Body, &bodyObj.Object); err == nil { + // ignore err. if it doesn't work we get the zero value and that's ok + if bodyObj != nil && len(bodyObj.GetAnnotations()["synthetic.mom.openshift.io/controller-instance-name"]) > 0 { + controllerInstanceName = bodyObj.GetAnnotations()["synthetic.mom.openshift.io/controller-instance-name"] + } + if bodyObj != nil && len(bodyObj.GetAnnotations()["operator.openshift.io/controller-instance-name"]) > 0 { + controllerInstanceName = bodyObj.GetAnnotations()["operator.openshift.io/controller-instance-name"] + } + } + + return expandedMetadata{ + metadata: metadata, + fieldManager: fieldManager, + controllerInstanceName: controllerInstanceName, + } +} + +func reasonForDiff(nameOfDestination string, sourceRequestsToCheck []manifestclient.SerializedRequest, allDestinationRequests []manifestclient.SerializedRequestish) []string { + reasons := []string{} + + for _, currSourceRequest := range sourceRequestsToCheck { + currDestinationRequests := manifestclient.RequestsForResource(allDestinationRequests, currSourceRequest.GetLookupMetadata()) + + if len(currDestinationRequests) == 0 { + reasons = append(reasons, fmt.Sprintf("%s is missing: %v", nameOfDestination, currSourceRequest.StringID())) + continue + } + + isApply := currSourceRequest.GetSerializedRequest().Action == manifestclient.ActionApply || currSourceRequest.GetSerializedRequest().Action == manifestclient.ActionApplyStatus + lhsMetadata := expandedMetadataFor(currSourceRequest.GetSerializedRequest()) + + found := false + mismatchReasons := []string{} + for i, currDestinationRequest := range currDestinationRequests { + if manifestclient.EquivalentSerializedRequests(currSourceRequest, currDestinationRequest) { + found = true + mismatchReasons = nil + break + } + // if we're doing an apply and the field manager doesn't match, then it's just a case of "content isn't here" versus a diff + // actions match because the metadata (which contains action) matched + if isApply { + lhsOptions := currSourceRequest.GetSerializedRequest().Options + rhsOptions := currDestinationRequest.GetSerializedRequest().Options + if !bytes.Equal(lhsOptions, rhsOptions) { + // if the options for apply (which contains the field manager) aren't the same, then the requests + // are logically different requests and incomparable + continue + } + } + + // we know the metadata is the same, something else doesn't match + if !bytes.Equal(currSourceRequest.GetSerializedRequest().Options, currDestinationRequest.GetSerializedRequest().Options) { + mismatchReasons = append(mismatchReasons, + fmt.Sprintf("mutation: %v, fieldManager=%v, controllerInstanceName=%v, %v[%d]: options diff: %v", + currSourceRequest.GetSerializedRequest().StringID(), + lhsMetadata.fieldManager, + lhsMetadata.controllerInstanceName, + nameOfDestination, + i, + cmp.Diff(currSourceRequest.GetSerializedRequest().Options, currDestinationRequest.GetSerializedRequest().Options), + ), + ) + } + + metadata := currSourceRequest.GetSerializedRequest().GetLookupMetadata() + if isResourceIgnoredForBodyComparison(metadata.ResourceType.GroupResource()) { + found = true + break + } + + if !bytes.Equal(currSourceRequest.GetSerializedRequest().Body, currDestinationRequest.GetSerializedRequest().Body) { + mismatchReasons = append(mismatchReasons, + fmt.Sprintf("mutation: %v, fieldManager=%v, controllerInstanceName=%v, %v[%d]: body diff: %v", + currSourceRequest.GetSerializedRequest().StringID(), + lhsMetadata.fieldManager, + lhsMetadata.controllerInstanceName, + nameOfDestination, + i, + cmp.Diff(currSourceRequest.GetSerializedRequest().Body, currDestinationRequest.GetSerializedRequest().Body), + ), + ) + } + } + if found { + continue + } + if !found && len(mismatchReasons) == 0 { + mismatchReasons = append(mismatchReasons, fmt.Sprintf("%s is missing equivalent request for fieldManager=%v controllerInstanceName=%v: %v", nameOfDestination, lhsMetadata.fieldManager, lhsMetadata.controllerInstanceName, currSourceRequest.StringID())) + } + reasons = append(reasons, mismatchReasons...) + } + return reasons +} + +func isResourceIgnoredForBodyComparison(gr schema.GroupResource) bool { + return gr == csrGR +} + +func isEventResource(gr schema.GroupResource) bool { + return gr == coreEventGR || gr == eventGR +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/operator_launch_helpers.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/operator_launch_helpers.go new file mode 100644 index 000000000..c29e47990 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/operator_launch_helpers.go @@ -0,0 +1,286 @@ +package libraryapplyconfiguration + +import ( + "context" + "errors" + "fmt" + "math/rand" + "reflect" + "runtime/debug" + "strings" + "time" + + "github.com/openshift/library-go/pkg/controller/factory" + "github.com/openshift/library-go/pkg/manifestclient" + "github.com/openshift/library-go/pkg/operator/events" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/dynamic/dynamicinformer" +) + +type OperatorStarter interface { + RunOnce(ctx context.Context, input ApplyConfigurationInput) (*ApplyConfigurationRunResult, AllDesiredMutationsGetter, error) + Start(ctx context.Context) error +} + +type SimpleOperatorStarter struct { + Informers []SimplifiedInformerFactory + ControllerNamedRunOnceFns []NamedRunOnce + // ControllerRunFns is useful during a transition to coalesce the operator launching flow. + ControllerRunFns []RunFunc +} + +var ( + _ OperatorStarter = &SimpleOperatorStarter{} + _ SimplifiedInformerFactory = generatedInformerFactory{} + _ SimplifiedInformerFactory = dynamicInformerFactory{} + _ SimplifiedInformerFactory = generatedNamespacedInformerFactory{} +) + +func (a SimpleOperatorStarter) RunOnce(ctx context.Context, input ApplyConfigurationInput) (*ApplyConfigurationRunResult, AllDesiredMutationsGetter, error) { + for _, informer := range a.Informers { + informer.Start(ctx) + } + // wait for sync so that when NamedRunOnce is called the listers will be ready. + // TODO add timeout + for _, informer := range a.Informers { + informer.WaitForCacheSync(ctx) + } + + knownControllersSet := sets.NewString() + duplicateControllerNames := []string{} + for _, controllerRunner := range a.ControllerNamedRunOnceFns { + if knownControllersSet.Has(controllerRunner.ControllerInstanceName()) { + duplicateControllerNames = append(duplicateControllerNames, controllerRunner.ControllerInstanceName()) + continue + } + knownControllersSet.Insert(controllerRunner.ControllerInstanceName()) + } + if len(duplicateControllerNames) > 0 { + return nil, nil, fmt.Errorf("the following controllers were requested to run multiple times: %v", duplicateControllerNames) + } + + if errs := validateControllersFromFlags(knownControllersSet, input.Controllers); len(errs) > 0 { + return nil, nil, errors.Join(errs...) + } + + allControllersRunResult := &ApplyConfigurationRunResult{} + + shuffleNamedRunOnce(a.ControllerNamedRunOnceFns) + errs := []error{} + for _, controllerRunner := range a.ControllerNamedRunOnceFns { + func() { + currControllerResult := ControllerRunResult{ + ControllerName: controllerRunner.ControllerInstanceName(), + Status: ControllerRunStatusUnknown, + } + defer func() { + if r := recover(); r != nil { + currControllerResult.Status = ControllerRunStatusPanicked + currControllerResult.PanicStack = fmt.Sprintf("%s\n%s", r, string(debug.Stack())) + } + allControllersRunResult.ControllerResults = append(allControllersRunResult.ControllerResults, currControllerResult) + }() + + if !isControllerEnabled(controllerRunner.ControllerInstanceName(), input.Controllers) { + currControllerResult.Status = ControllerRunStatusSkipped + return + } + localCtx, localCancel := context.WithTimeout(ctx, 1*time.Second) + defer localCancel() + + localCtx = manifestclient.WithControllerInstanceNameFromContext(localCtx, controllerRunner.ControllerInstanceName()) + if err := controllerRunner.RunOnce(localCtx); err != nil { + currControllerResult.Status = ControllerRunStatusFailed + currControllerResult.Errors = append(currControllerResult.Errors, ErrorDetails{Message: err.Error()}) + errs = append(errs, fmt.Errorf("controller %q failed: %w", controllerRunner.ControllerInstanceName(), err)) + } else { + currControllerResult.Status = ControllerRunStatusSucceeded + } + }() + } + + // canonicalize + CanonicalizeApplyConfigurationRunResult(allControllersRunResult) + + return allControllersRunResult, NewApplyConfigurationFromClient(input.MutationTrackingClient.GetMutations()), errors.Join(errs...) +} + +func (a SimpleOperatorStarter) Start(ctx context.Context) error { + for _, informer := range a.Informers { + informer.Start(ctx) + } + + for _, controllerRunFn := range a.ControllerRunFns { + go controllerRunFn(ctx) + } + return nil +} + +type SimplifiedInformerFactory interface { + Start(ctx context.Context) + WaitForCacheSync(ctx context.Context) +} + +type NamedRunOnce interface { + ControllerInstanceName() string + RunOnce(context.Context) error +} + +type namedRunOnce struct { + controllerInstanceName string + runOnce RunOnceFunc +} + +func NewNamedRunOnce(controllerInstanceName string, runOnce RunOnceFunc) *namedRunOnce { + return &namedRunOnce{ + controllerInstanceName: controllerInstanceName, + runOnce: runOnce, + } +} + +func (r *namedRunOnce) RunOnce(ctx context.Context) error { + return r.runOnce(ctx) +} + +func (r *namedRunOnce) ControllerInstanceName() string { + return r.controllerInstanceName +} + +type RunOnceFunc func(ctx context.Context) error + +type RunFunc func(ctx context.Context) + +type GeneratedInformerFactory interface { + Start(stopCh <-chan struct{}) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool +} + +func GeneratedInformerFactoryAdapter(in GeneratedInformerFactory) SimplifiedInformerFactory { + return generatedInformerFactory{delegate: in} +} + +func DynamicInformerFactoryAdapter(in dynamicinformer.DynamicSharedInformerFactory) SimplifiedInformerFactory { + return dynamicInformerFactory{delegate: in} +} + +func GeneratedNamespacedInformerFactoryAdapter(in GeneratedNamespacedInformerFactory) SimplifiedInformerFactory { + return generatedNamespacedInformerFactory{delegate: in} +} + +func AdaptRunFn(fn func(ctx context.Context, workers int)) RunFunc { + return func(ctx context.Context) { + fn(ctx, 1) + } +} + +func AdaptSyncFn(eventRecorder events.Recorder, controllerName string, originalRunOnce func(ctx context.Context, syncCtx factory.SyncContext) error) NamedRunOnce { + return NewNamedRunOnce(controllerName, func(ctx context.Context) error { + syncCtx := factory.NewSyncContext("run-once-sync-context", eventRecorder) + return originalRunOnce(ctx, syncCtx) + }) +} + +type Syncer interface { + Sync(ctx context.Context, controllerContext factory.SyncContext) error +} + +type ControllerWithInstanceName interface { + ControllerInstanceName() string +} + +func AdaptNamedController(eventRecorder events.Recorder, controller Syncer) NamedRunOnce { + controllerWithInstanceName, ok := controller.(ControllerWithInstanceName) + if !ok { + panic(fmt.Sprintf("%T doesn't expose ControllerInstanceName() method which is required", controller)) + } + controllerInstanceName := controllerWithInstanceName.ControllerInstanceName() + if len(controllerInstanceName) == 0 { + panic(fmt.Sprintf("%T cannot return an empty ControllerInstanceName", controller)) + } + + return NewNamedRunOnce(controllerInstanceName, func(ctx context.Context) error { + syncCtx := factory.NewSyncContext("run-named-once-sync-context", eventRecorder) + return controller.Sync(ctx, syncCtx) + }) +} + +type generatedInformerFactory struct { + delegate GeneratedInformerFactory +} + +func (g generatedInformerFactory) Start(ctx context.Context) { + g.delegate.Start(ctx.Done()) +} + +func (g generatedInformerFactory) WaitForCacheSync(ctx context.Context) { + g.delegate.WaitForCacheSync(ctx.Done()) +} + +type dynamicInformerFactory struct { + delegate dynamicinformer.DynamicSharedInformerFactory +} + +func (g dynamicInformerFactory) Start(ctx context.Context) { + g.delegate.Start(ctx.Done()) +} + +func (g dynamicInformerFactory) WaitForCacheSync(ctx context.Context) { + g.delegate.WaitForCacheSync(ctx.Done()) +} + +type GeneratedNamespacedInformerFactory interface { + Start(stopCh <-chan struct{}) + WaitForCacheSync(stopCh <-chan struct{}) map[string]map[reflect.Type]bool +} + +type generatedNamespacedInformerFactory struct { + delegate GeneratedNamespacedInformerFactory +} + +func (g generatedNamespacedInformerFactory) Start(ctx context.Context) { + g.delegate.Start(ctx.Done()) +} + +func (g generatedNamespacedInformerFactory) WaitForCacheSync(ctx context.Context) { + g.delegate.WaitForCacheSync(ctx.Done()) +} + +func shuffleNamedRunOnce(controllers []NamedRunOnce) { + rand.Shuffle(len(controllers), func(i, j int) { + controllers[i], controllers[j] = controllers[j], controllers[i] + }) +} + +func isControllerEnabled(name string, controllers []string) bool { + hasStar := false + for _, ctrl := range controllers { + if ctrl == name { + return true + } + if ctrl == "-"+name { + return false + } + if ctrl == "*" { + hasStar = true + } + } + + return hasStar +} + +func validateControllersFromFlags(allKnownControllersSet sets.String, controllersToRunFromFlags []string) []error { + var errs []error + for _, initialName := range controllersToRunFromFlags { + if initialName == "*" { + continue + } + initialNameWithoutPrefix := strings.TrimPrefix(initialName, "-") + controllerName := initialNameWithoutPrefix + if !allKnownControllersSet.Has(controllerName) { + errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", initialNameWithoutPrefix)) + } + } + + return errs +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/options.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/options.go new file mode 100644 index 000000000..40a1ba5e3 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/options.go @@ -0,0 +1,83 @@ +package libraryapplyconfiguration + +import ( + "context" + "errors" + "fmt" + "github.com/openshift/library-go/pkg/manifestclient" + "os" + "path/filepath" + "sigs.k8s.io/yaml" + + "github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources" +) + +type applyConfigurationOptions struct { + applyConfigurationFn ApplyConfigurationFunc + outputResourcesFn libraryoutputresources.OutputResourcesFunc + + input ApplyConfigurationInput + + outputDirectory string +} + +func newApplyConfigurationOptions( + applyConfigurationFn ApplyConfigurationFunc, + outputResourcesFn libraryoutputresources.OutputResourcesFunc, + input ApplyConfigurationInput, + outputDirectory string) *applyConfigurationOptions { + return &applyConfigurationOptions{ + applyConfigurationFn: applyConfigurationFn, + outputResourcesFn: outputResourcesFn, + input: input, + outputDirectory: outputDirectory, + } +} + +func (o *applyConfigurationOptions) Run(ctx context.Context) error { + if err := os.MkdirAll(o.outputDirectory, 0755); err != nil && !os.IsExist(err) { + return fmt.Errorf("unable to create output directory %q:%v", o.outputDirectory, err) + } + + errs := []error{} + allAllowedOutputResources, err := o.outputResourcesFn(ctx) + if err != nil { + errs = append(errs, err) + } + + controllerResults, mutations, err := o.applyConfigurationFn(ctx, o.input) + if err != nil { + errs = append(errs, err) + } + + // also validate the raw results because filtering may have eliminated "bad" output. + unspecifiedOutputResources := UnspecifiedOutputResources(mutations, allAllowedOutputResources) + if err := ValidateAllDesiredMutationsGetter(mutations, allAllowedOutputResources); err != nil { + errs = append(errs, err) + } + + // now filter the results and check them + filteredResult := FilterAllDesiredMutationsGetter(mutations, allAllowedOutputResources) + if err := ValidateAllDesiredMutationsGetter(filteredResult, allAllowedOutputResources); err != nil { + errs = append(errs, err) + } + + if err := WriteApplyConfiguration(filteredResult, o.outputDirectory); err != nil { + errs = append(errs, err) + } + if len(unspecifiedOutputResources) > 0 { + if err := manifestclient.WriteMutationDirectory(filepath.Join(o.outputDirectory, "Unspecified"), unspecifiedOutputResources...); err != nil { + errs = append(errs, err) + } + } + + if controllerResultBytes, err := yaml.Marshal(controllerResults); err != nil { + errs = append(errs, fmt.Errorf("failed marshalling controller results: %w", err)) + } else { + if err := os.WriteFile(filepath.Join(o.outputDirectory, "controller-results.yaml"), controllerResultBytes, 0644); err != nil { + errs = append(errs, fmt.Errorf("failed writing controller results: %w", err)) + } + } + + return errors.Join(errs...) +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/resources.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/resources.go new file mode 100644 index 000000000..5721abc40 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/resources.go @@ -0,0 +1,9 @@ +package libraryapplyconfiguration + +import "k8s.io/apimachinery/pkg/runtime/schema" + +var ( + coreEventGR = schema.GroupResource{Group: "", Resource: "events"} + eventGR = schema.GroupResource{Group: "events.k8s.io", Resource: "events"} + csrGR = schema.GroupResource{Group: "certificates.k8s.io", Resource: "certificatesigningrequests"} +) diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/types.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/types.go new file mode 100644 index 000000000..d920a2dbd --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/types.go @@ -0,0 +1,54 @@ +package libraryapplyconfiguration + +import ( + "slices" + "strings" +) + +type ApplyConfigurationRunResult struct { + ControllerResults []ControllerRunResult `json:"controllerResults"` +} + +type ControllerRunResult struct { + ControllerName string `json:"controllerName"` + Status ControllerRunStatus `json:"status"` + Errors []ErrorDetails `json:"errors,omitempty"` + PanicStack string `json:"panicStack,omitempty"` +} + +type ControllerRunStatus string + +var ( + ControllerRunStatusUnknown ControllerRunStatus = "Unknown" + ControllerRunStatusSucceeded ControllerRunStatus = "Succeeded" + ControllerRunStatusSkipped ControllerRunStatus = "Skipped" + ControllerRunStatusFailed ControllerRunStatus = "Failed" + ControllerRunStatusPanicked ControllerRunStatus = "Panicked" +) + +// TODO perhaps we add indications about interfaces this matches? +type ErrorDetails struct { + Message string `json:"message"` +} + +func CanonicalizeApplyConfigurationRunResult(obj *ApplyConfigurationRunResult) { + if obj == nil { + return + } + slices.SortStableFunc(obj.ControllerResults, sortControllerRunResult) +} + +// TODO sort with error details +func sortControllerRunResult(a, b ControllerRunResult) int { + if c := strings.Compare(a.ControllerName, b.ControllerName); c != 0 { + return c + } + if c := strings.Compare(string(a.Status), string(b.Status)); c != 0 { + return c + } + if c := strings.Compare(a.PanicStack, b.PanicStack); c != 0 { + return c + } + + return 0 +} diff --git a/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/write.go b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/write.go new file mode 100644 index 000000000..6a9dee015 --- /dev/null +++ b/vendor/github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration/write.go @@ -0,0 +1 @@ +package libraryapplyconfiguration diff --git a/vendor/k8s.io/utils/clock/testing/fake_clock.go b/vendor/k8s.io/utils/clock/testing/fake_clock.go new file mode 100644 index 000000000..9503690be --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/fake_clock.go @@ -0,0 +1,374 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "sync" + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&FakePassiveClock{}) + _ = clock.WithTicker(&FakeClock{}) + _ = clock.Clock(&IntervalClock{}) +) + +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { + lock sync.RWMutex + time time.Time +} + +// FakeClock implements clock.Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock + + // waiters are waiting for the fake time to pass their specified time + waiters []*fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time + afterFunc func() +} + +// NewFakePassiveClock returns a new FakePassiveClock. +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } +} + +// NewFakeClock constructs a fake clock set to the provided time. +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + FakePassiveClock: *NewFakePassiveClock(t), + } +} + +// Now returns f's time. +func (f *FakePassiveClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// SetTime sets the time on the FakePassiveClock. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + +// After is the fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// NewTimer constructs a fake timer, akin to time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// AfterFunc is the Fake version of time.AfterFunc(d, cb). +func (f *FakeClock) AfterFunc(d time.Duration, cb func()) clock.Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + afterFunc: cb, + }, + } + f.waiters = append(f.waiters, &timer.waiter) + return timer +} + +// Tick constructs a fake ticker, akin to time.Tick +func (f *FakeClock) Tick(d time.Duration) <-chan time.Time { + if d <= 0 { + return nil + } + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return ch +} + +// NewTicker returns a new Ticker. +func (f *FakeClock) NewTicker(d time.Duration) clock.Ticker { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, &fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return &fakeTicker{ + c: ch, + } +} + +// Step moves the clock by Duration and notifies anyone that's called After, +// Tick, or NewTimer. +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// SetTime sets the time. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]*fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := f.waiters[i] + if !w.targetTime.After(t) { + if w.skipIfBlocked { + select { + case w.destChan <- t: + default: + } + } else { + w.destChan <- t + } + + if w.afterFunc != nil { + w.afterFunc() + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// HasWaiters returns true if Waiters() returns non-0 (so you can write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +// Waiters returns the number of "waiters" on the clock (so you can write race-free +// tests). A waiter exists for: +// - every call to After that has not yet signaled its channel. +// - every call to AfterFunc that has not yet called its callback. +// - every timer created with NewTimer which is currently ticking. +// - every ticker created with NewTicker which is currently ticking. +// - every ticker created with Tick. +func (f *FakeClock) Waiters() int { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) +} + +// Sleep is akin to time.Sleep +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration. +// IntervalClock technically implements the other methods of clock.Clock, but each implementation is just a panic. +// +// Deprecated: See SimpleIntervalClock for an alternative that only has the methods of PassiveClock. +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// After is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// NewTimer is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) clock.Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// AfterFunc is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) AfterFunc(d time.Duration, f func()) clock.Timer { + panic("IntervalClock doesn't implement AfterFunc") +} + +// Tick is unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) Tick(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement Tick") +} + +// NewTicker has no implementation yet and is omitted. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTicker(d time.Duration) clock.Ticker { + panic("IntervalClock doesn't implement NewTicker") +} + +// Sleep is unimplemented, will panic. +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +var _ = clock.Timer(&fakeTimer{}) + +// fakeTimer implements clock.Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop prevents the Timer from firing. It returns true if the call stops the +// timer, false if the timer has already expired or been stopped. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := false + newWaiters := make([]*fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, w) + continue + } + // If timer is found, it has not been fired yet. + active = true + } + + f.fakeClock.waiters = newWaiters + + return active +} + +// Reset changes the timer to expire after duration d. It returns true if the +// timer had been active, false if the timer had expired or been stopped. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + + active := false + + f.waiter.targetTime = f.fakeClock.time.Add(d) + + for i := range f.fakeClock.waiters { + w := f.fakeClock.waiters[i] + if w == &f.waiter { + // If timer is found, it has not been fired yet. + active = true + break + } + } + if !active { + f.fakeClock.waiters = append(f.fakeClock.waiters, &f.waiter) + } + + return active +} + +type fakeTicker struct { + c <-chan time.Time +} + +func (t *fakeTicker) C() <-chan time.Time { + return t.c +} + +func (t *fakeTicker) Stop() { +} diff --git a/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go new file mode 100644 index 000000000..951ca4d17 --- /dev/null +++ b/vendor/k8s.io/utils/clock/testing/simple_interval_clock.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "time" + + "k8s.io/utils/clock" +) + +var ( + _ = clock.PassiveClock(&SimpleIntervalClock{}) +) + +// SimpleIntervalClock implements clock.PassiveClock, but each invocation of Now steps the clock forward the specified duration +type SimpleIntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *SimpleIntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *SimpleIntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 8971d9f6f..5bdbd5722 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -405,6 +405,8 @@ github.com/openshift/library-go/test/library github.com/openshift/library-go/test/library/metrics # github.com/openshift/multi-operator-manager v0.0.0-20250930141021-05cb0b9abdb4 ## explicit; go 1.24.0 +github.com/openshift/multi-operator-manager/pkg/flagtypes +github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration github.com/openshift/multi-operator-manager/pkg/library/libraryinputresources github.com/openshift/multi-operator-manager/pkg/library/libraryoutputresources # github.com/pkg/errors v0.9.1 @@ -1508,6 +1510,7 @@ k8s.io/kube-openapi/pkg/validation/strfmt/bson ## explicit; go 1.18 k8s.io/utils/buffer k8s.io/utils/clock +k8s.io/utils/clock/testing k8s.io/utils/internal/third_party/forked/golang/golang-lru k8s.io/utils/internal/third_party/forked/golang/net k8s.io/utils/lru From 275b248b17a4b6d6a1b7a059ecd6de15e608a813 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:10:47 -0300 Subject: [PATCH 7/8] Add OM apply-configuration command stub Generated with Claude Code --- .../main.go | 1 + pkg/cmd/mom/apply_configuration_command.go | 47 +++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 pkg/cmd/mom/apply_configuration_command.go diff --git a/cmd/cluster-kube-controller-manager-operator/main.go b/cmd/cluster-kube-controller-manager-operator/main.go index 5aaf5634b..2d350cec0 100644 --- a/cmd/cluster-kube-controller-manager-operator/main.go +++ b/cmd/cluster-kube-controller-manager-operator/main.go @@ -48,6 +48,7 @@ func NewSSCSCommand(ctx context.Context) *cobra.Command { cmd.AddCommand(recoverycontroller.NewCertRecoveryControllerCommand(ctx)) cmd.AddCommand(mom.NewInputResourcesCommand(ioStreams)) cmd.AddCommand(mom.NewOutputResourcesCommand(ioStreams)) + cmd.AddCommand(mom.NewApplyConfigurationCommand(ioStreams)) return cmd } diff --git a/pkg/cmd/mom/apply_configuration_command.go b/pkg/cmd/mom/apply_configuration_command.go new file mode 100644 index 000000000..f915967a5 --- /dev/null +++ b/pkg/cmd/mom/apply_configuration_command.go @@ -0,0 +1,47 @@ +package mom + +import ( + "context" + "fmt" + + "github.com/openshift/multi-operator-manager/pkg/library/libraryapplyconfiguration" + "github.com/spf13/cobra" + "k8s.io/cli-runtime/pkg/genericiooptions" +) + +func NewApplyConfigurationCommand(streams genericiooptions.IOStreams) *cobra.Command { + return libraryapplyconfiguration.NewApplyConfigurationCommand(RunApplyConfiguration, runOutputResources, streams) +} + +func RunApplyConfiguration(ctx context.Context, input libraryapplyconfiguration.ApplyConfigurationInput) (*libraryapplyconfiguration.ApplyConfigurationRunResult, libraryapplyconfiguration.AllDesiredMutationsGetter, error) { + // TODO: Implement operator reconciliation logic + // + // The manifestclient (input.ManagementClient) is a drop-in replacement for standard k8s clients. + // Pass it to your operator and run sync logic ONCE (not in a loop). + // + // Implementation steps: + // 1. Create operator client using input.ManagementClient (manifestclient) + // 2. Create informers from the manifestclient + // 3. Initialize the operator with these clients + // 4. Run sync logic ONCE (not in a control loop) + // 5. Return the result + // + // Example pattern: + // operatorClient, dynamicInformers, err := genericoperatorclient.NewStaticPodOperatorClient(...) + // if err != nil { return nil, nil, err } + // + // // Create controllers with manifestclient-based informers + // // Run sync once (not Start()) + // // Return result + // + // Reference implementation: + // github.com/openshift/cluster-authentication-operator/pkg/cmd/mom/apply_configuration_command.go + // + // Key considerations: + // - Use input.ManagementClient instead of real k8s client + // - Use input.ManagementEventRecorder for events + // - Run sync ONCE, not in a loop + // - The manifestclient reads from input directory and writes to output directory + + return nil, nil, fmt.Errorf("apply-configuration not yet implemented - see TODO comments above for implementation guidance") +} From 8fb3507c69f030199c100a018424e8ac05254352 Mon Sep 17 00:00:00 2001 From: Fabio Bertinatto Date: Fri, 28 Nov 2025 15:11:23 -0300 Subject: [PATCH 8/8] Add OM test infrastructure Generated with Claude Code --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index d121ac53e..7d2f83dc4 100644 --- a/Makefile +++ b/Makefile @@ -7,6 +7,7 @@ include $(addprefix ./vendor/github.com/openshift/build-machinery-go/make/, \ targets/openshift/images.mk \ targets/openshift/deps.mk \ targets/openshift/operator/telepresence.mk \ + targets/openshift/operator/mom.mk \ ) # Exclude e2e tests from unit testing